test_dir = tmpdir.mkdir("profiles") await setup.async_setup_component(hass, "persistent_notification", {}) entry = MockConfigEntry(domain=DOMAIN) entry.add_to_hass(hass) assert await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() assert hass.services.has_service(DOMAIN, SERVICE_START) last_filename = None def _mock_path(filename): nonlocal last_filename last_filename = f"{test_dir}/{filename}" return last_filename with patch("homeassistant.components.profiler.cProfile.Profile"), patch.object( hass.config, "path", _mock_path ): await hass.services.async_call(DOMAIN, SERVICE_START, {CONF_SECONDS: 0.000001}) await hass.async_block_till_done() assert os.path.exists(last_filename) assert await hass.config_entries.async_unload(entry.entry_id) await hass.async_block_till_done() async def test_memory_usage(hass, tmpdir): """Test we can setup and the service is registered.""" test_dir = tmpdir.mkdir("profiles") await setup.async_setup_component(hass, "persistent_notification", {}) entry = MockConfigEntry(domain=DOMAIN) entry.add_to_hass(hass) assert await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() assert hass.services.has_service(DOMAIN, SERVICE_MEMORY) last_filename = None def _mock_path(filename): nonlocal last_filename last_filename = f"{test_dir}/{filename}" return last_filename with patch("homeassistant.components.profiler.hpy") as mock_hpy, patch.object( hass.config, "path", _mock_path ): await hass.services.async_call(DOMAIN, SERVICE_MEMORY, {CONF_SECONDS: 0.000001}) await hass.async_block_till_done() mock_hpy.assert_called_once() assert await hass.config_entries.async_unload(entry.entry_id) await hass.async_block_till_done() async def test_object_growth_logging(hass, caplog): """Test we can setup and the service and we can dump objects to the log.""" await setup.async_setup_component(hass, "persistent_notification", {}) entry = MockConfigEntry(domain=DOMAIN) entry.add_to_hass(hass) assert await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() assert hass.services.has_service(DOMAIN, SERVICE_START_LOG_OBJECTS) assert hass.services.has_service(DOMAIN, SERVICE_STOP_LOG_OBJECTS) await hass.services.async_call( DOMAIN, SERVICE_START_LOG_OBJECTS, {CONF_SCAN_INTERVAL: 10} ) await hass.async_block_till_done() assert "Growth" in caplog.text caplog.clear() async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=11)) await hass.async_block_till_done() assert "Growth" in caplog.text await hass.services.async_call(DOMAIN, SERVICE_STOP_LOG_OBJECTS, {}) await hass.async_block_till_done() caplog.clear() async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=21)) await hass.async_block_till_done() assert "Growth" not in caplog.text assert await hass.config_entries.async_unload(entry.entry_id) await hass.async_block_till_done() async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=31)) await hass.async_block_till_done() assert "Growth" not in caplog.text async def test_dump_log_object(hass, caplog): """Test we can setup and the service is registered and logging works.""" await setup.async_setup_component(hass, "persistent_notification", {}) entry = MockConfigEntry(domain=DOMAIN) entry.add_to_hass(hass) assert await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() assert hass.services.has_service(DOMAIN, SERVICE_DUMP_LOG_OBJECTS) await hass.services.async_call( DOMAIN, SERVICE_DUMP_LOG_OBJECTS, {CONF_TYPE: "MockConfigEntry"} ) await hass.async_block_till_done() assert "MockConfigEntry" in caplog.text caplog.clear() assert await hass.config_entries.async_unload(entry.entry_id) await hass.async_block_till_done() #!/usr/bin/env python import subprocess import re import os import errno import collections import sys class Platform(object): pass sdk_re = re.compile(r'.*-sdk ([a-zA-Z0-9.]*)') def sdkinfo(sdkname): ret = {} for line in subprocess.Popen(['xcodebuild', '-sdk', sdkname, '-version'], stdout=subprocess.PIPE).stdout: kv = line.strip().split(': ', 1) if len(kv) == 2: k,v = kv ret[k] = v return ret desktop_sdk_info = sdkinfo('macosx') def latest_sdks(): latest_desktop = None for line in subprocess.Popen(['xcodebuild', '-showsdks'], stdout=subprocess.PIPE).stdout: match = sdk_re.match(line) if match: if 'OS X' in line: latest_desktop = match.group(1) return latest_desktop desktop_sdk = latest_sdks() class desktop_platform_32(Platform): sdk='macosx' arch = 'i386' name = 'mac32' triple = 'i386-apple-darwin10' sdkroot = desktop_sdk_info['Path'] prefix = "#if defined(__i386__) && !defined(__x86_64__)\n\n" suffix = "\n\n#endif" class desktop_platform_64(Platform): sdk='macosx' arch = 'x86_64' name = 'mac' triple = 'x86_64-apple-darwin10' sdkroot = desktop_sdk_info['Path'] prefix = "#if !defined(__i386__) && defined(__x86_64__)\n\n" suffix = "\n\n#endif" def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''): if not os.path.exists(dst_dir): os.makedirs(dst_dir) out_filename = filename if file_suffix: split_name = os.path.splitext(filename) out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1]) with open(os.path.join(src_dir, filename)) as in_file: with open(os.path.join(dst_dir, out_filename), 'w') as out_file: if prefix: out_file.write(prefix) out_file.write(in_file.read()) if suffix: out_file.write(suffix) headers_seen = collections.defaultdict(set) def move_source_tree(src_dir, dest_dir, dest_include_dir, arch=None, prefix=None, suffix=None): for root, dirs, files in os.walk(src_dir, followlinks=True): relroot = os.path.relpath(root,src_dir) def move_dir(arch, prefix='', suffix='', files=[]): for file in files: file_suffix = None if file.endswith('.h'): if dest_include_dir: file_suffix = arch if arch: headers_seen[file].add(arch) move_file(root, dest_include_dir, file, arch, prefix=prefix, suffix=suffix) elif dest_dir: outroot = os.path.join(dest_dir, relroot) move_file(root, outroot, file, prefix=prefix, suffix=suffix) if relroot == '.': move_dir(arch=arch, files=files, prefix=prefix, suffix=suffix) elif relroot == 'x86': move_dir(arch='i386', prefix="#if defined(__i386__) && !defined(__x86_64__)\n\n", suffix="\n\n#endif", files=files) move_dir(arch='x86_64', prefix="#if !defined(__i386__) && defined(__x86_64__)\n\n", suffix="\n\n#endif", files=files) def build_target(platform): def xcrun_cmd(cmd): return subprocess.check_output(['xcrun', '-sdk', platform.sdkroot, '-find', cmd]).strip() build_dir = 'build_' + platform.name if not os.path.exists(build_dir): os.makedirs(build_dir) env = dict(CC=xcrun_cmd('clang'), LD=xcrun_cmd('ld'), CFLAGS='-arch %s -isysroot %s -mmacosx-version-min=10.6' % (platform.arch, platform.sdkroot)) working_dir=os.getcwd() try: os.chdir(build_dir) subprocess.check_call(['../configure', '-host', platform.triple], env=env) move_source_tree('.', None, '../osx/include', arch=platform.arch, prefix=platform.prefix, suffix=platform.suffix) move_source_tree('./include', None, '../osx/include', arch=platform.arch, prefix=platform.prefix, suffix=platform.suffix) finally: os.chdir(working_dir) for header_name, archs in headers_seen.iteritems(): basename, suffix = os.path.splitext(header_name) def main(): move_source_tree('src', 'osx/src', 'osx/include') move_source_tree('include', None, 'osx/include') build_target(desktop_platform_32) build_target(desktop_platform_64) for header_name, archs in headers_seen.iteritems(): basename, suffix = os.path.splitext(header_name) with open(os.path.join('osx/include', header_name), 'w') as header: for arch in archs: header.write('#include <%s_%s%s>\n' % (basename, arch, suffix)) if __name__ == '__main__': main() from discord.ext import commands from .utils.dataIO import dataIO from .utils.chat_formatting import escape_mass_mentions from .utils import checks from collections import defaultdict from string import ascii_letters from random import choice import discord import os import re import aiohttp import asyncio import logging import json class StreamsError(Exception): pass class StreamNotFound(StreamsError): pass class APIError(StreamsError): pass class InvalidCredentials(StreamsError): pass class OfflineStream(StreamsError): pass class Streams: """Streams Alerts for a variety of streaming services""" def __init__(self, bot): self.bot = bot self.twitch_streams = dataIO.load_json("data/streams/twitch.json") self.hitbox_streams = dataIO.load_json("data/streams/hitbox.json") self.mixer_streams = dataIO.load_json("data/streams/beam.json") self.picarto_streams = dataIO.load_json("data/streams/picarto.json") settings = dataIO.load_json("data/streams/settings.json") self.settings = defaultdict(dict, settings) self.messages_cache = defaultdict(list) @commands.command() async def hitbox(self, stream: str): """Checks if hitbox stream is online""" stream = escape_mass_mentions(stream) regex = r'^(https?\:\/\/)?(www\.)?(hitbox\.tv\/)' stream = re.sub(regex, '', stream) try: embed = await self.hitbox_online(stream) except OfflineStream: await self.bot.say(stream + " is offline.") except StreamNotFound: await self.bot.say("That stream doesn't exist.") except APIError: await self.bot.say("Error contacting the API.") else: await self.bot.say(embed=embed) @commands.command(pass_context=True) async def twitch(self, ctx, stream: str): """Checks if twitch stream is online""" stream = escape_mass_mentions(stream) regex = r'^(https?\:\/\/)?(www\.)?(twitch\.tv\/)' stream = re.sub(regex, '', stream) try: data = await self.fetch_twitch_ids(stream, raise_if_none=True) embed = await self.twitch_online(data[0]["_id"]) except OfflineStream: await self.bot.say(stream + " is offline.") except StreamNotFound: await self.bot.say("That stream doesn't exist.") except APIError: await self.bot.say("Error contacting the API.") except InvalidCredentials: await self.bot.say("Owner: Client-ID is invalid or not set. " "See `{}streamset twitchtoken`" "".format(ctx.prefix)) else: await self.bot.say(embed=embed) @commands.command() async def mixer(self, stream: str): """Checks if mixer stream is online""" stream = escape_mass_mentions(stream) regex = r'^(https?\:\/\/)?(www\.)?(mixer\.com\/)' stream = re.sub(regex, '', stream) try: embed = await self.mixer_online(stream) except OfflineStream: await self.bot.say(stream + " is offline.") except StreamNotFound: await self.bot.say("That stream doesn't exist.") except APIError: await self.bot.say("Error contacting the API.") else: await self.bot.say(embed=embed) @commands.command() async def picarto(self, stream: str): """Checks if picarto stream is online""" stream = escape_mass_mentions(stream) regex = r'^(https?\:\/\/)?(www\.)?(picarto\.tv\/)' stream = re.sub(regex, '', stream) try: embed = await self.picarto_online(stream) except OfflineStream: await self.bot.say(stream + " is offline.") except StreamNotFound: await self.bot.say("That stream doesn't exist.") except APIError: await self.bot.say("Error contacting the API.") else: await self.bot.say(embed=embed) @commands.group(pass_context=True, no_pm=True) @checks.mod_or_permissions(manage_server=True) async def streamalert(self, ctx): """Adds/removes stream alerts from the current channel""" if ctx.invoked_subcommand is None: await self.bot.send_cmd_help(ctx) @streamalert.command(name="twitch", pass_context=True) async def twitch_alert(self, ctx, stream: str): """Adds/removes twitch alerts from the current channel""" stream = escape_mass_mentions(stream) regex = r'^(https?\:\/\/)?(www\.)?(twitch\.tv\/)' stream = re.sub(regex, '', stream) channel = ctx.message.channel try: data = await self.fetch_twitch_ids(stream, raise_if_none=True) except StreamNotFound: await self.bot.say("That stream doesn't exist.") return except APIError: await self.bot.say("Error contacting the API.") return except InvalidCredentials: await self.bot.say("Owner: Client-ID is invalid or not set. " "See `{}streamset twitchtoken`" "".format(ctx.prefix)) return enabled = self.enable_or_disable_if_active(self.twitch_streams, stream, channel, _id=data[0]["_id"]) if enabled: await self.bot.say("Alert activated. I will notify this channel " "when {} is live.".format(stream)) else: await self.bot.say("Alert has been removed from this channel.") dataIO.save_json("data/streams/twitch.json", self.twitch_streams) @streamalert.command(name="hitbox", pass_context=True) async def hitbox_alert(self, ctx, stream: str): """Adds/removes hitbox alerts from the current channel""" stream = escape_mass_mentions(stream) regex = r'^(https?\:\/\/)?(www\.)?(hitbox\.tv\/)' stream = re.sub(regex, '', stream) channel = ctx.message.channel try: await self.hitbox_online(stream) except StreamNotFound: await self.bot.say("That stream doesn't exist.") return except APIError: await self.bot.say("Error contacting the API.") return except OfflineStream: pass enabled = self.enable_or_disable_if_active(self.hitbox_streams, stream, channel) if enabled: await self.bot.say("Alert activated. I will notify this channel " "when {} is live.".format(stream)) else: await self.bot.say("Alert has been removed from this channel.") dataIO.save_json("data/streams/hitbox.json", self.hitbox_streams) @streamalert.command(name="mixer", pass_context=True) async def mixer_alert(self, ctx, stream: str): """Adds/removes mixer alerts from the current channel""" stream = escape_mass_mentions(stream) regex = r'^(https?\:\/\/)?(www\.)?(mixer\.com\/)' stream = re.sub(regex, '', stream) channel = ctx.message.channel try: await self.mixer_online(stream) except StreamNotFound: await self.bot.say("That stream doesn't exist.") return except APIError: await self.bot.say("Error contacting the API.") return except OfflineStream: pass enabled = self.enable_or_disable_if_active(self.mixer_streams, stream, channel) if enabled: await self.bot.say("Alert activated. I will notify this channel " "when {} is live.".format(stream)) else: await self.bot.say("Alert has been removed from this channel.") dataIO.save_json("data/streams/beam.json", self.mixer_streams) @streamalert.command(name="picarto", pass_context=True) async def picarto_alert(self, ctx, stream: str): """Adds/removes picarto alerts from the current channel""" stream = escape_mass_mentions(stream) regex = r'^(https?\:\/\/)?(www\.)?(picarto\.tv\/)' stream = re.sub(regex, '', stream) channel = ctx.message.channel try: await self.picarto_online(stream) except StreamNotFound: await self.bot.say("That stream doesn't exist.") return except APIError: await self.bot.say("Error contacting the API.") return except OfflineStream: pass enabled = self.enable_or_disable_if_active(self.picarto_streams, stream, channel) if enabled: await self.bot.say("Alert activated. I will notify this channel " "when {} is live.".format(stream)) else: await self.bot.say("Alert has been removed from this channel.") dataIO.save_json("data/streams/picarto.json", self.picarto_streams) @streamalert.command(name="stop", pass_context=True) async def stop_alert(self, ctx): """Stops all streams alerts in the current channel""" channel = ctx.message.channel streams = ( self.hitbox_streams, self.twitch_streams, self.mixer_streams, self.picarto_streams ) for stream_type in streams: to_delete = [] for s in stream_type: if channel.id in s["CHANNELS"]: s["CHANNELS"].remove(channel.id) if not s["CHANNELS"]: to_delete.append(s) for s in to_delete: stream_type.remove(s) dataIO.save_json("data/streams/twitch.json", self.twitch_streams) dataIO.save_json("data/streams/hitbox.json", self.hitbox_streams) dataIO.save_json("data/streams/beam.json", self.mixer_streams) dataIO.save_json("data/streams/picarto.json", self.picarto_streams) await self.bot.say("There will be no more stream alerts in this " "channel.") @commands.group(pass_context=True) async def streamset(self, ctx): """Stream settings""" if ctx.invoked_subcommand is None: await self.bot.send_cmd_help(ctx) @streamset.command() @checks.is_owner() async def twitchtoken(self, token : str): """Sets the Client ID for twitch To do this, follow these steps: 1. Go to this page: https://dev.twitch.tv/dashboard/apps. 2. Click 'Register Your Application' 3. Enter a name, set the OAuth Redirect URI to 'http://localhost', and select an Application Category of your choosing. 4. Click 'Register', and on the following page, copy the Client ID. 5. Paste the Client ID into this command. Done! """ self.settings["TWITCH_TOKEN"] = token dataIO.save_json("data/streams/settings.json", self.settings) await self.bot.say('Twitch Client-ID set.') @streamset.command(pass_context=True, no_pm=True) @checks.admin() async def mention(self, ctx, *, mention_type : str): """Sets mentions for stream alerts Types: everyone, here, none""" server = ctx.message.server mention_type = mention_type.lower() if mention_type in ("everyone", "here"): self.settings[server.id]["MENTION"] = "@" + mention_type await self.bot.say("When a stream is online @\u200b{} will be " "mentioned.".format(mention_type)) elif mention_type == "none": self.settings[server.id]["MENTION"] = "" await self.bot.say("Mentions disabled.") else: await self.bot.send_cmd_help(ctx) dataIO.save_json("data/streams/settings.json", self.settings) @streamset.command(pass_context=True, no_pm=True) @checks.admin() async def autodelete(self, ctx): """Toggles automatic notification deletion for streams that go offline""" server = ctx.message.server settings = self.settings[server.id] current = settings.get("AUTODELETE", True) settings["AUTODELETE"] = not current if settings["AUTODELETE"]: await self.bot.say("Notifications will be automatically deleted " "once the stream goes offline.") else: await self.bot.say("Notifications won't be deleted anymore.") dataIO.save_json("data/streams/settings.json", self.settings) async def hitbox_online(self, stream): url = "https://api.hitbox.tv/media/live/" + stream async with aiohttp.get(url) as r: data = await r.json(encoding='utf-8') if "livestream" not in data: raise StreamNotFound() elif data["livestream"][0]["media_is_live"] == "0": raise OfflineStream() elif data["livestream"][0]["media_is_live"] == "1": return self.hitbox_embed(data) raise APIError() async def twitch_online(self, stream): session = aiohttp.ClientSession() url = "https://api.twitch.tv/kraken/streams/" + stream header = { 'Client-ID': self.settings.get("TWITCH_TOKEN", ""), 'Accept': 'application/vnd.twitchtv.v5+json' } async with session.get(url, headers=header) as r: data = await r.json(encoding='utf-8') await session.close() if r.status == 200: if data["stream"] is None: raise OfflineStream() return self.twitch_embed(data) elif r.status == 400: raise InvalidCredentials() elif r.status == 404: raise StreamNotFound() else: raise APIError() async def mixer_online(self, stream): url = "https://mixer.com/api/v1/channels/" + stream async with aiohttp.get(url) as r: data = await r.json(encoding='utf-8') if r.status == 200: if data["online"] is True: return self.mixer_embed(data) else: raise OfflineStream() elif r.status == 404: raise StreamNotFound() else: raise APIError() async def picarto_online(self, stream): url = "https://api.picarto.tv/v1/channel/name/" + stream async with aiohttp.get(url) as r: data = await r.text(encoding='utf-8') if r.status == 200: data = json.loads(data) if data["online"] is True: return self.picarto_embed(data) else: raise OfflineStream() elif r.status == 404: raise StreamNotFound() else: raise APIError() async def fetch_twitch_ids(self, *streams, raise_if_none=False): def chunks(l): for i in range(0, len(l), 100): yield l[i:i + 100] base_url = "https://api.twitch.tv/kraken/users?login=" header = { 'Client-ID': self.settings.get("TWITCH_TOKEN", ""), 'Accept': 'application/vnd.twitchtv.v5+json' } results = [] for streams_list in chunks(streams): session = aiohttp.ClientSession() url = base_url + ",".join(streams_list) async with session.get(url, headers=header) as r: data = await r.json(encoding='utf-8') if r.status == 200: results.extend(data["users"]) elif r.status == 400: raise InvalidCredentials() else: raise APIError() await session.close() if not results and raise_if_none: raise StreamNotFound() return results def twitch_embed(self, data): channel = data["stream"]["channel"] url = channel["url"] logo = channel["logo"] if logo is None: logo = "https://static-cdn.jtvnw.net/jtv_user_pictures/xarth/404_user_70x70.png" status = channel["status"] if not status: status = "Untitled broadcast" embed = discord.Embed(title=status, url=url) embed.set_author(name=channel["display_name"]) embed.add_field(name="Followers", value=channel["followers"]) embed.add_field(name="Total views", value=channel["views"]) embed.set_thumbnail(url=logo) if data["stream"]["preview"]["medium"]: embed.set_image(url=data["stream"]["preview"]["medium"] + self.rnd_attr()) if channel["game"]: embed.set_footer(text="Playing: " + channel["game"]) embed.color = 0x6441A4 return embed def hitbox_embed(self, data): base_url = "https://edge.sf.hitbox.tv" livestream = data["livestream"][0] channel = livestream["channel"] url = channel["channel_link"] embed = discord.Embed(title=livestream["media_status"], url=url) embed.set_author(name=livestream["media_name"]) embed.add_field(name="Followers", value=channel["followers"]) #embed.add_field(name="Views", value=channel["views"]) embed.set_thumbnail(url=base_url + channel["user_logo"]) if livestream["media_thumbnail"]: embed.set_image(url=base_url + livestream["media_thumbnail"] + self.rnd_attr()) embed.set_footer(text="Playing: " + livestream["category_name"]) embed.color = 0x98CB00 return embed def mixer_embed(self, data): default_avatar = ("https://mixer.com/_latest/assets/images/main/" "avatars/default.jpg") user = data["user"] url = "https://mixer.com/" + data["token"] embed = discord.Embed(title=data["name"], url=url) embed.set_author(name=user["username"]) embed.add_field(name="Followers", value=data["numFollowers"]) embed.add_field(name="Total views", value=data["viewersTotal"]) if user["avatarUrl"]: embed.set_thumbnail(url=user["avatarUrl"]) else: embed.set_thumbnail(url=default_avatar) if data["thumbnail"]: embed.set_image(url=data["thumbnail"]["url"] + self.rnd_attr()) embed.color = 0x4C90F3 if data["type"] is not None: embed.set_footer(text="Playing: " + data["type"]["name"]) return embed def picarto_embed(self, data): avatar = ("https://picarto.tv/user_data/usrimg/{}/dsdefault.jpg{}" "".format(data["name"].lower(), self.rnd_attr())) url = "https://picarto.tv/" + data["name"] thumbnail = data["thumbnails"]["web"] embed = discord.Embed(title=data["title"], url=url) embed.set_author(name=data["name"]) embed.set_image(url=thumbnail + self.rnd_attr()) embed.add_field(name="Followers", value=data["followers"]) embed.add_field(name="Total views", value=data["viewers_total"]) embed.set_thumbnail(url=avatar) embed.color = 0x132332 data["tags"] = ", ".join(data["tags"]) if not data["tags"]: data["tags"] = "None" if data["adult"]: data["adult"] = "NSFW | " else: data["adult"] = "" embed.color = 0x4C90F3 embed.set_footer(text="{adult}Category: {category} | Tags: {tags}" "".format(**data)) return embed def enable_or_disable_if_active(self, streams, stream, channel, _id=None): """Returns True if enabled or False if disabled""" for i, s in enumerate(streams): stream_id = s.get("ID") if stream_id and _id: # ID is available, matching by ID is if stream_id != _id: # preferable continue else: # ID unavailable, matching by name if s["NAME"] != stream: continue if channel.id in s["CHANNELS"]: streams[i]["CHANNELS"].remove(channel.id) if not s["CHANNELS"]: streams.remove(s) return False else: streams[i]["CHANNELS"].append(channel.id) return True data = {"CHANNELS": [channel.id], "NAME": stream, "ALREADY_ONLINE": False} if _id: data["ID"] = _id streams.append(data) return True async def stream_checker(self): CHECK_DELAY = 60 try: await self._migration_twitch_v5() except InvalidCredentials: print("Error during convertion of twitch usernames to IDs: " "invalid token") except Exception as e: print("Error during convertion of twitch usernames to IDs: " "{}".format(e)) while self == self.bot.get_cog("Streams"): save = False streams = ((self.twitch_streams, self.twitch_online), (self.hitbox_streams, self.hitbox_online), (self.mixer_streams, self.mixer_online), (self.picarto_streams, self.picarto_online)) for streams_list, parser in streams: if parser == self.twitch_online: _type = "ID" else: _type = "NAME" for stream in streams_list: if _type not in stream: continue key = (parser, stream[_type]) try: embed = await parser(stream[_type]) except OfflineStream: if stream["ALREADY_ONLINE"]: stream["ALREADY_ONLINE"] = False save = True await self.delete_old_notifications(key) except: # We don't want our task to die continue else: if stream["ALREADY_ONLINE"]: continue save = True stream["ALREADY_ONLINE"] = True messages_sent = [] for channel_id in stream["CHANNELS"]: channel = self.bot.get_channel(channel_id) if channel is None: continue mention = self.settings.get(channel.server.id, {}).get("MENTION", "") can_speak = channel.permissions_for(channel.server.me).send_messages message = mention + " {} is live!".format(stream["NAME"]) if channel and can_speak: m = await self.bot.send_message(channel, message, embed=embed) messages_sent.append(m) self.messages_cache[key] = messages_sent await asyncio.sleep(0.5) if save: dataIO.save_json("data/streams/twitch.json", self.twitch_streams) dataIO.save_json("data/streams/hitbox.json", self.hitbox_streams) dataIO.save_json("data/streams/beam.json", self.mixer_streams) dataIO.save_json("data/streams/picarto.json", self.picarto_streams) await asyncio.sleep(CHECK_DELAY) async def delete_old_notifications(self, key): for message in self.messages_cache[key]: server = message.server settings = self.settings.get(server.id, {}) is_enabled = settings.get("AUTODELETE", True) try: if is_enabled: await self.bot.delete_message(message) except: pass del self.messages_cache[key] def rnd_attr(self): """Avoids Discord's caching""" return "?rnd=" + "".join([choice(ascii_letters) for i in range(6)]) async def _migration_twitch_v5(self): # Migration of old twitch streams to API v5 to_convert = [] for stream in self.twitch_streams: if "ID" not in stream: to_convert.append(stream["NAME"]) if not to_convert: return results = await self.fetch_twitch_ids(*to_convert) for stream in self.twitch_streams: for result in results: if stream["NAME"].lower() == result["name"].lower(): stream["ID"] = result["_id"] # We might as well delete the invalid / renamed ones self.twitch_streams = [s for s in self.twitch_streams if "ID" in s] dataIO.save_json("data/streams/twitch.json", self.twitch_streams) def check_folders(): if not os.path.exists("data/streams"): print("Creating data/streams folder...") os.makedirs("data/streams") def check_files(): stream_files = ( "twitch.json", "hitbox.json", "beam.json", "picarto.json" ) for filename in stream_files: if not dataIO.is_valid_json("data/streams/" + filename): print("Creating empty {}...".format(filename)) dataIO.save_json("data/streams/" + filename, []) f = "data/streams/settings.json" if not dataIO.is_valid_json(f): print("Creating empty settings.json...") dataIO.save_json(f, {}) def setup(bot): logger = logging.getLogger('aiohttp.client') logger.setLevel(50) # Stops warning spam check_folders() check_files() n = Streams(bot) loop = asyncio.get_event_loop() loop.create_task(n.stream_checker()) bot.add_cog(n) # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import webnotes from webnotes.utils import cint def execute(filters=None): if not filters: filters ={} days_since_last_order = filters.get("days_since_last_order") if cint(days_since_last_order) <= 0: webnotes.msgprint("Please mention positive value in 'Days Since Last Order' field",raise_exception=1) columns = get_columns() customers = get_so_details() data = [] for cust in customers: if cint(cust[8]) >= cint(days_since_last_order): cust.insert(7,get_last_so_amt(cust[0])) data.append(cust) return columns, data def get_so_details(): return webnotes.conn.sql("""select cust.name, cust.customer_name, cust.territory, cust.customer_group, count(distinct(so.name)) as 'num_of_order', sum(net_total) as 'total_order_value', sum(if(so.status = "Stopped", so.net_total * so.per_delivered/100, so.net_total)) as 'total_order_considered', max(so.transaction_date) as 'last_sales_order_date', DATEDIFF(CURDATE(), max(so.transaction_date)) as 'days_since_last_order' from `tabCustomer` cust, `tabSales Order` so where cust.name = so.customer and so.docstatus = 1 group by cust.name order by 'days_since_last_order' desc """,as_list=1) def get_last_so_amt(customer): res = webnotes.conn.sql("""select net_total from `tabSales Order` where customer ='%(customer)s' and docstatus = 1 order by transaction_date desc limit 1""" % {'customer':customer}) return res and res[0][0] or 0 def get_columns(): return [ "Customer:Link/Customer:120", "Customer Name:Data:120", "Territory::120", "Customer Group::120", "Number of Order::120", "Total Order Value:Currency:120", "Total Order Considered:Currency:160", "Last Order Amount:Currency:160", "Last Sales Order Date:Date:160", "Days Since Last Order::160" ] # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from selenium.webdriver.common.by import By from pages.base import BasePage from pages.regions.download_button import DownloadButton class HomePage(BasePage): _intro_download_button_locator = (By.ID, 'download-intro') # legacy home page _primary_download_button_locator = (By.ID, 'download-primary') _secondary_download_button_locator = (By.ID, 'download-secondary') _primary_accounts_button_locator = (By.ID, 'fxa-learn-primary') _secondary_accounts_button_locator = (By.ID, 'fxa-learn-secondary') @property def intro_download_button(self): el = self.find_element(*self._intro_download_button_locator) return DownloadButton(self, root=el) @property def primary_download_button(self): el = self.find_element(*self._primary_download_button_locator) return DownloadButton(self, root=el) @property def secondary_download_button(self): el = self.find_element(*self._secondary_download_button_locator) return DownloadButton(self, root=el) @property def is_primary_accounts_button(self): return self.is_element_displayed(*self._primary_accounts_button_locator) @property def is_secondary_accounts_button(self): return self.is_element_displayed(*self._secondary_accounts_button_locator) from .. import constants, logger from . import base_classes, utilities, api class Material(base_classes.BaseNode): """Class that wraps material nodes""" def __init__(self, node, parent): logger.debug("Material().__init__(%s)", node) base_classes.BaseNode.__init__(self, node, parent, constants.MATERIAL) self._common_attributes() if self[constants.TYPE] == constants.THREE_PHONG: self._phong_attributes() textures = self.parent.options.get(constants.MAPS) if textures: self._update_maps() def _common_attributes(self): """Parse the common material attributes""" logger.debug('Material()._common_attributes()') dispatch = { constants.PHONG: constants.THREE_PHONG, constants.LAMBERT: constants.THREE_LAMBERT, constants.BASIC: constants.THREE_BASIC } shader_type = api.material.type(self.node) self[constants.TYPE] = dispatch[shader_type] diffuse = api.material.diffuse_color(self.node) self[constants.COLOR] = utilities.rgb2int(diffuse) if self[constants.TYPE] != constants.THREE_BASIC: emissive = api.material.emissive_color(self.node) self[constants.EMISSIVE] = utilities.rgb2int(emissive) vertex_color = api.material.use_vertex_colors(self.node) if vertex_color: self[constants.VERTEX_COLORS] = constants.VERTEX_COLORS_ON else: self[constants.VERTEX_COLORS] = constants.VERTEX_COLORS_OFF self[constants.BLENDING] = api.material.blending(self.node) if api.material.transparent(self.node): self[constants.TRANSPARENT] = True if api.material.double_sided(self.node): self[constants.SIDE] = constants.SIDE_DOUBLE self[constants.DEPTH_TEST] = api.material.depth_test(self.node) self[constants.DEPTH_WRITE] = api.material.depth_write(self.node) def _phong_attributes(self): """Parse phong specific attributes""" logger.debug("Material()._phong_attributes()") specular = api.material.specular_color(self.node) self[constants.SPECULAR] = utilities.rgb2int(specular) self[constants.SHININESS] = api.material.specular_coef(self.node) def _update_maps(self): """Parses maps/textures and updates the textures array with any new nodes found. """ logger.debug("Material()._update_maps()") mapping = ( (api.material.diffuse_map, constants.MAP), (api.material.specular_map, constants.SPECULAR_MAP), (api.material.light_map, constants.LIGHT_MAP) ) for func, key in mapping: map_node = func(self.node) if map_node: logger.info('Found map node %s for %s', map_node, key) tex_inst = self.scene.texture(map_node.name) self[key] = tex_inst[constants.UUID] if self[constants.TYPE] == constants.THREE_PHONG: mapping = ( (api.material.bump_map, constants.BUMP_MAP, constants.BUMP_SCALE, api.material.bump_scale), (api.material.normal_map, constants.NORMAL_MAP, constants.NORMAL_SCALE, api.material.normal_scale) ) for func, map_key, scale_key, scale_func in mapping: map_node = func(self.node) if not map_node: continue logger.info("Found map node %s for %s", map_node, map_key) tex_inst = self.scene.texture(map_node.name) self[map_key] = tex_inst[constants.UUID] self[scale_key] = scale_func(self.node) # ca_wizard_mechanical, version 0.2 # Allows the generation of comm-files for simple 3D structural analyses in code_aster with an interactive GUI # # This work is licensed under the terms and conditions of the GNU General Public License version 3 # Copyright (C) 2017 Dominik Lechleitner # Contact: kaktus018(at)gmail.com # GitHub repository: https://github.com/kaktus018/ca_wizard_mechanical # # This file is part of ca_wizard_mechanical. # # ca_wizard_mechanical is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ca_wizard_mechanical is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ca_wizard_mechanical. If not, see . import xml.etree.ElementTree as ET from keyword import iskeyword from copy import deepcopy ls = "\n" def setMatLibPath(p): global matLibPath matLibPath = p def setVersion(v): global cawmVersion cawmVersion = v def isNumeric(*args): for s in args: try: float(s) except ValueError: return False return True def isInteger(*args): for s in args: try: int(s) except ValueError: return False return True def hasFunction(functionList,*args): for f in args: if f in [functionList[i].funcName for i in range(len(functionList))]: return True return False def hasConstant(functionList,*args): for c in args: if c not in [functionList[i].funcName for i in range(len(functionList))] and c: return True return False # check if string is either empty, a function or numeric - in which case: return True def checkValidEntry(functionList,*args): for el in args: if el and not hasFunction(functionList,el) and not isNumeric(el): return False return True class cawmInst: def __init__(self,solverSet,names,workingDir,studyName): self.solverSet = solverSet self.names = names self.workingDir = workingDir self.studyName = studyName self.cawmVersion = cawmVersion class PyFunction: def __init__(self,funcName,funcText): self.funcName = funcName self.funcText = funcText # verify function name and see if the interpreter raises an exception def verify(self,functionList,checkFuncDefi): msgs = [] if not self.funcName.isidentifier() or iskeyword(self.funcName): msgs.append(self.funcName + " is not a valid function name. The function will not be checked for further errors.") elif checkFuncDefi: try: exec("def " + self.funcName + "(x,y,z,t):\n" + self.funcText) except Exception as e: msgs.append("While trying to evaluate the Python function " + self.funcName + " the Python 3 interpreter raised the following exception:\n" + str(e)) return msgs class Material: def __init__(self,matName): root = ET.parse(matLibPath).getroot() for child in root: if child.attrib["name"] == matName: self.matName = matName self.matNum = child.find("materialNumber").text self.matCat = child.find("category").text self.youngsModulus = child.find("YoungsModulus").text self.poissonRatio = child.find("PoissonRatio").text self.alpha = child.find("alpha").text self.density = child.find("density").text return class MaterialSet: def __init__(self,assiName,nodalGroupName,materialName): self.assiName = assiName self.nodalGroupName = nodalGroupName self.material = Material(materialName) # verify datatype of properties and node group name def verify(self,names,functionList): msgs = [] if not isNumeric(self.material.youngsModulus,self.material.poissonRatio): msgs.append(self.assiName + ": Young's modulus or Poisson's ratio is not numeric.") if not [self.nodalGroupName, "Volume"] in [names[i] for i in range(len(names))] and not self.nodalGroupName == "whole mesh": msgs.append(self.assiName + ": Material is not assigned to a valid node group.") return msgs class NodeJointSet: def __init__(self,assiName,jointGroupName,nodeName,cX,cY,cZ,cPhiX,cPhiY,cPhiZ): self.assiName = assiName self.jointGroupName = jointGroupName self.nodeName = nodeName self.cX = cX self.cY = cY self.cZ = cZ self.cPhiX = cPhiX self.cPhiY = cPhiY self.cPhiZ = cPhiZ # verify datatype of properties and node group name def verify(self,names,functionList): msgs = [] if not isNumeric(self.cX, self.cY, self.cZ, self.cPhiX, self.cPhiY, self.cPhiZ): msgs.append(self.assiName + ": At least one stiffness value is not numeric.") if not [self.jointGroupName, "Node joint group"] in [names[i] for i in range(len(names))]: msgs.append(self.assiName + ": Node group name for the node joint group is not valid.") if not [self.nodeName, "Vertex/Node"] in [names[i] for i in range(len(names))]: msgs.append(self.assiName + ": Node group name for the node is not valid.") return msgs class RestraintSet: def __init__(self,assiName,nodalGroupName,rotMatViaPython,deltaX,deltaY,deltaZ,deltaPhiX,deltaPhiY,deltaPhiZ,xTrans,yTrans,zTrans,rotX,rotY,rotZ,reacMX,reacMY,reacMZ): self.assiName = assiName self.nodalGroupName = nodalGroupName self.rotMatViaPython = rotMatViaPython self.deltaX = deltaX self.deltaY = deltaY self.deltaZ = deltaZ self.deltaPhiX = deltaPhiX self.deltaPhiY = deltaPhiY self.deltaPhiZ = deltaPhiZ self.xTrans = xTrans self.yTrans = yTrans self.zTrans = zTrans self.rotX = rotX self.rotY = rotY self.rotZ = rotZ self.reacMX = reacMX self.reacMY = reacMY self.reacMZ = reacMZ # verify datatype of properties and node group name def verify(self,names,functionList): msgs = [] if self.rotMatViaPython: if hasFunction(functionList,self.deltaX,self.deltaY,self.deltaZ,self.deltaPhiX,self.deltaPhiY,self.deltaPhiZ): raise ValueError(self.assiName + ": When using the provided function for the rotation matrix the entries for the restraints can not be a function.") if (self.rotX and not (self.deltaX and self.deltaPhiY and self.deltaPhiZ)) or (self.rotY and not(self.deltaY and self.deltaPhiX and self.deltaPhiZ)) or \ (self.rotZ and not (self.deltaZ and self.deltaPhiX and self.deltaPhiY)): raise ValueError(self.assiName + ": When using the provided function for the rotation matrix the translational DoFs for all axes to which the rotation is applied to have to be restrained.") if not isNumeric(self.deltaPhiX, self.deltaPhiY, self.deltaPhiZ, self.xTrans, self.yTrans, self.zTrans): msgs.append(self.assiName + ": Inputs for the rotational DoFs and the coordinates of the rotation center have to be numeric. (All rotational DoFs have to be restrained).") if not checkValidEntry(functionList,self.deltaX,self.deltaY,self.deltaZ,self.deltaPhiX,self.deltaPhiY,self.deltaPhiZ): msgs.append(self.assiName + ": At least one input for translation or rotation is neither a function nor numeric. If this is related to the rotational DoFs and the restraint is not assigned to " + \ "a node of a node joint group you can ignore this warning.") if not isNumeric(self.reacMX, self.reacMY, self.reacMZ): msgs.append(self.assiName + ": At least one input for the coordinates for the computation of the torsional reactions is not numeric.") if not [self.nodalGroupName, "Surface"] in [names[i] for i in range(len(names))] and not [self.nodalGroupName, "Edge"] in [names[i] for i in range(len(names))] and \ not [self.nodalGroupName, "Vertex/Node"] in [names[i] for i in range(len(names))]: msgs.append(self.assiName + ": Restraint is not assigned to a valid node group.") return msgs class LoadSet: def __init__(self,assiName,nodalGroupName,loadType,FX,FY,FZ,MX,MY,MZ,p,gX,gY,gZ,omega,centerX,centerY,centerZ,axisX,axisY,axisZ): self.assiName = assiName self.nodalGroupName = nodalGroupName self.loadType = loadType self.FX = FX self.FY = FY self.FZ = FZ self.MX = MX self.MY = MY self.MZ = MZ self.p = p self.gX = gX self.gY = gY self.gZ = gZ self.omega = omega self.centerX = centerX self.centerY = centerY self.centerZ = centerZ self.axisX = axisX self.axisY = axisY self.axisZ = axisZ # verify datatype of properties and node group name def verify(self,names,functionList): msgs = [] if self.loadType == "Gravity": if not isNumeric(self.gX, self.gY, self.gZ): msgs.append(self.assiName + ": At least one input for the gravity vector is not numeric.") if not [self.nodalGroupName, "Volume"] in [names[i] for i in range(len(names))] and not self.nodalGroupName == "whole mesh": msgs.append(self.assiName + ": Load is not assigned to a valid node group.") if self.loadType == "Centrifugal force": if not isNumeric(self.omega, self.centerX, self.centerY, self.centerZ, self.axisX, self.axisY, self.axisZ): msgs.append(self.assiName + ": At least one input for the rotation is not numeric.") if not [self.nodalGroupName, "Volume"] in [names[i] for i in range(len(names))] and not self.nodalGroupName == "whole mesh": msgs.append(self.assiName + ": Load is not assigned to a valid node group.") if self.loadType == "Force on volume": if not checkValidEntry(functionList, self.FX, self.FY, self.FZ): msgs.append(self.assiName + ": At least one input for the force vector is neither a function nor numeric.") if not [self.nodalGroupName, "Volume"] in [names[i] for i in range(len(names))] and not self.nodalGroupName == "whole mesh": msgs.append(self.assiName + ": Load is not assigned to a valid node group.") if self.loadType == "Force on face": if not checkValidEntry(functionList, self.FX, self.FY, self.FZ): msgs.append(self.assiName + ": At least one input for the force vector is neither a function nor numeric.") if not [self.nodalGroupName, "Surface"] in [names[i] for i in range(len(names))]: msgs.append(self.assiName + ": Load is not assigned to a valid node group.") if self.loadType == "Force on edge": if not checkValidEntry(functionList, self.FX, self.FY, self.FZ): msgs.append(self.assiName + ": At least one input for the force vector is neither a function nor numeric.") if not [self.nodalGroupName, "Edge"] in [names[i] for i in range(len(names))]: msgs.append(self.assiName + ": Load is not assigned to a valid node group.") if self.loadType == "Force on node": if not checkValidEntry(functionList, self.FX, self.FY, self.FZ, self.MX, self.MY, self.MZ): msgs.append(self.assiName + ": At least one input for the force or torque vector is neither a function nor numeric (if this message relates to the torque and the node" + \ "is not assigned to a node joint group, you can disregard this message).") if not self.nodalGroupName in [names[i][0] for i in range(len(names))] and not [self.nodalGroupName, "Node joint group"] in [names[i] for i in range(len(names))]: msgs.append(self.assiName + ": Load is not assigned to a valid node group.") if self.loadType == "Pressure": if not checkValidEntry(functionList, self.p) or not self.p: msgs.append(self.assiName + ": Input for the pressure is neither a function nor numeric.") if not [self.nodalGroupName, "Surface"] in [names[i] for i in range(len(names))]: msgs.append(self.assiName + ": Load is not assigned to a valid node group.") return msgs class ContactGlobalSetting: def __init__(self,formulationType,frictionModel,contactAlgo,frictionAlgo): self.formulationType = formulationType self.frictionModel = frictionModel self.contactAlgo = contactAlgo self.frictionAlgo = frictionAlgo class ContactSet: def __init__(self,assiName,masterName,slaveName,fricCoeff,contactAlgo,E_N,E_T,globalSettings): self.assiName = assiName self.masterName = masterName self.slaveName = slaveName self.fricCoeff = fricCoeff self.contactAlgo = contactAlgo self.E_N = E_N self.E_T = E_T self.globalSettings = globalSettings # verify datatype of properties and node group name def verify(self,names,functionList): msgs = [] if self.globalSettings.formulationType == "discrete": if self.contactAlgo == "PENALISATION": if not isNumeric(self.E_N): msgs.append(self.assiName + ": E_N is not numeric.") if self.globalSettings.frictionModel == "Coulomb": if not isNumeric(self.E_T): msgs.append(self.assiName + ": E_T is not numeric.") if not isNumeric(self.fricCoeff): msgs.append(self.assiName + ": Friction coefficient is not numeric.") else: if self.globalSettings.frictionModel == "Coulomb": if not isNumeric(self.fricCoeff): msgs.append(self.assiName + ": Friction coefficient is not numeric.") if not [self.masterName, "Surface"] in [names[i] for i in range(len(names))]: msgs.append(self.assiName + ": Master is not assigned to a valid node group.") if not [self.slaveName, "Surface"] in [names[i] for i in range(len(names))]: msgs.append(self.assiName + ": Slave is not assigned to a valid node group.") return msgs class ThermalSet: def __init__(self,assiName,nodalGroupName,assiType,deltaT,unite,T0,funStr): self.assiName = assiName self.nodalGroupName = nodalGroupName self.assiType = assiType self.deltaT = deltaT self.unite = unite self.T0 = T0 self.funStr = funStr # verify datatype of properties and node group name def verify(self,names,functionList): msgs = [] if self.assiType == "const": if not checkValidEntry(functionList, self.deltaT): msgs.append(self.assiName + ": \u0394T is neither a function nor numeric.") else: if not isNumeric(self.unite, self.T0): msgs.append(self.assiName + ": UNITE or T0 is not numeric.") if not [self.nodalGroupName, "Volume"] in [names[i] for i in range(len(names))] and not self.nodalGroupName == "whole mesh": msgs.append(self.assiName + ": Temp. field is not assigned to a valid node group.") return msgs class OutputSet: def __init__(self,nodalGroupName,SIGM,SIEQ,EPS,REAC,ERME,TEMP): self.nodalGroupName = nodalGroupName self.SIGM = SIGM self.SIEQ = SIEQ self.EPS = EPS self.REAC = REAC self.ERME = ERME self.TEMP = TEMP class SolverSet: def __init__(self,analysisType,timeSteps,endTime,timeRampUp,timeRampDown,timeRampFunc,strainModel,method,resi,maxIter,functions,checkFuncDefis,materialSets,nodeJointSets,restraintSets,loadSets,contactSets, thermalSets,outputSet): self.analysisType = analysisType self.timeSteps = timeSteps self.endTime = endTime self.timeRampUp = timeRampUp self.timeRampDown = timeRampDown self.timeRampFunc = timeRampFunc self.strainModel = strainModel self.method = method self.resi = resi self.maxIter = maxIter self.functions = functions self.checkFuncDefis = checkFuncDefis self.materialSets = materialSets self.nodeJointSets = nodeJointSets self.restraintSets = restraintSets self.loadSets = loadSets self.contactSets = contactSets self.thermalSets = thermalSets self.outputSet = outputSet # this method will check if relevant inputs are numeric and all assignments to node groups are valid. It will NOT check in anyway if the resulting comm-file will run in code_aster! def verify(self,names,functionList): msgs = [] if len(self.materialSets) == 0 or len(self.restraintSets) == 0: msgs.extend(["The current setup has no material assignments and/or no restraint assignments."]) for el in self.functions: msgs.extend(el.verify(functionList,self.checkFuncDefis)) for el in self.materialSets + self.nodeJointSets + self.restraintSets + self.loadSets + self.contactSets + self.thermalSets: msgs.extend(el.verify(names,functionList)) if not isInteger(self.timeSteps): raise ValueError("The number of time steps is not of type integer.") if not isNumeric(self.endTime): msgs.extend(["The simulation end time is not numeric."]) if self.analysisType == "non-linear static": if not isInteger(self.maxIter): msgs.extend(["The number of max. iterations has to be of type integer."]) if not isNumeric(self.resi): msgs.extend(["Max. relative global residual is not numeric."]) if int(self.timeSteps) < 1: msgs.extend(["A non-linear analysis requires at least one time step."]) if self.timeRampUp and self.timeRampDown and not int(self.timeSteps) % 2 == 0: msgs.extend(["Ramping loads and restraints up AND down requires an even amount of time steps. Otherwise a computation with their max. values will not happen."]) if self.outputSet.ERME and len(self.nodeJointSets) > 0: msgs.extend(["Calculation of the error a posteriori (ERME) with code_aster version <= 13.2 can only be performed on the whole mesh. This will not work with the discrete element" + \ " of a node joint (MODELISATION='DIS_TR')."]) return msgs # generate string for comm-file def assembleCOMM(self): def getFormuleName(funcName): for el in formules: if el[1] == funcName: return el[0] return None pythonFuns = "" # If any restraints require the application of the roational matrix, add generic translation functions if sum([self.restraintSets[i].rotMatViaPython for i in range(len(self.restraintSets))]) > 0: pythonFuns = "# Generic translation functions:" + ls + "def translate_X(deltaX,phiY,phiZ,XTrans,YTrans,ZTrans,X,Y,Z):" + ls + \ " return deltaX+(X-XTrans)*cos(phiY)+(Z-ZTrans)*sin(phiY)+(X-XTrans)*cos(phiZ)-(Y-YTrans)*sin(phiZ)-2*(X-XTrans)" + ls + ls + \ "def translate_Y(deltaY,phiX,phiZ,XTrans,YTrans,ZTrans,X,Y,Z):" + ls + \ " return deltaY+(Y-YTrans)*cos(phiX)-(Z-ZTrans)*sin(phiX)+(Y-YTrans)*cos(phiZ)+(X-XTrans)*sin(phiZ)-2*(Y-YTrans)" + ls + ls + \ "def translate_Z(deltaZ,phiX,phiY,XTrans,YTrans,ZTrans,X,Y,Z):" + ls + \ " return deltaZ+(Z-ZTrans)*cos(phiX)+(Y-YTrans)*sin(phiX)+(Z-ZTrans)*cos(phiY)-(X-XTrans)*sin(phiY)-2*(Z-ZTrans)" + ls + ls # For restraints that use the generic translation functions defined above, add wrapper functions to the functions list restraintSetsLocal = deepcopy(self.restraintSets) # allow local modification of the restraint sets without compromising the original data functionsLocal = deepcopy(self.functions) # allow local modification of the functions list without compromising the original data for el in restraintSetsLocal: if el.rotMatViaPython: if not el.deltaX == "" or el.rotX: if el.rotX: phiY = str(float(el.deltaPhiY)) phiZ = str(float(el.deltaPhiZ)) else: phiY = "0.0" phiZ = "0.0" functionsLocal.append(PyFunction("DX_" + el.assiName, " return translate_X("+str(float(el.deltaX))+","+phiY+","+phiZ+","+ \ str(float(el.xTrans))+","+str(float(el.yTrans))+","+str(float(el.zTrans))+",x,y,z)")) el.deltaX = "DX_" + el.assiName if not el.deltaY == "" or el.rotY: if el.rotY: phiX = str(float(el.deltaPhiX)) phiZ = str(float(el.deltaPhiZ)) else: phiX = "0.0" phiZ = "0.0" functionsLocal.append(PyFunction("DY_" + el.assiName, " return translate_Y("+str(float(el.deltaY))+","+phiX+","+phiZ+","+ \ str(float(el.xTrans))+","+str(float(el.yTrans))+","+str(float(el.zTrans))+",x,y,z)")) el.deltaY = "DY_" + el.assiName if not el.deltaZ == "" or el.rotZ: if el.rotZ: phiX = str(float(el.deltaPhiX)) phiY = str(float(el.deltaPhiY)) else: phiX = "0.0" phiY = "0.0" functionsLocal.append(PyFunction("DZ_" + el.assiName, " return translate_Z("+str(float(el.deltaZ))+","+phiX+","+phiY+","+ \ str(float(el.xTrans))+","+str(float(el.yTrans))+","+str(float(el.zTrans))+",x,y,z)")) el.deltaZ = "DZ_" + el.assiName # Add all Python functions in the functions list to the comm-file if len(functionsLocal) > 0: pythonFuns = pythonFuns + "# User defined Python functions and wrappers for the generic translation functions" + ls + ls for el in functionsLocal: pythonFuns = pythonFuns + "def " + el.funcName + "(x,y,z,t):" + ls + el.funcText + ls + ls # DEBUT statement debutStr = ls + "# Start of code_aster commands" + ls + "DEBUT();" + ls + ls # list of time steps if int(self.timeSteps) > 0: tListStr = "# list of time steps" + ls + "TLIST=DEFI_LIST_REEL(DEBUT=0.0,INTERVALLE=_F(JUSQU_A=" + self.endTime + ",NOMBRE=" + self.timeSteps + ",),);" + ls + ls "SF=DEFI_FONCTION(NOM_PARA='INST',VALE=(" if self.timeRampUp == 1 and self.timeRampDown == 1: tListStr = tListStr + "SF=DEFI_FONCTION(NOM_PARA='INST',VALE=(0.0, 0.0, 0.5, 1.0 ,1.0 ,0.0,),);" + ls + ls elif self.timeRampUp: tListStr = tListStr + "SF=DEFI_FONCTION(NOM_PARA='INST',VALE=(0.0, 0.0, 1.0, 1.0,),);" + ls + ls elif self.timeRampDown: tListStr = tListStr + "SF=DEFI_FONCTION(NOM_PARA='INST',VALE=(0.0, 1.0, 1.0, 0.0,),);" + ls + ls else: tListStr = "" # Bind all Python functions to corresponding code_aster formules formuleStr = "" formules = [] for el in functionsLocal: if formuleStr == "": formuleStr = "# Assign formules" + ls # store all identifiers for the formules in a list (max. 8 characters allowed for identifier -> can not use the actual function name) # formule name at index 0, function name at index 1 formules.append(["F" + str(len(formules)),el.funcName]) formuleStr = formuleStr + formules[-1][0] + "=FORMULE(VALE='" + el.funcName + "(X,Y,Z,INST)',NOM_PARA=('X','Y','Z','INST',),);" + ls if not formuleStr == "": formuleStr = formuleStr + ls # material definitions matDefiStr = "# Material definitions" + ls matDefiNames = [] # same here as with the formules - use short identifiers for the material definitions for el in self.materialSets: matDefiNames.append("MA"+str(len(matDefiNames))) matDefiStr = matDefiStr + matDefiNames[-1] + "=" + "DEFI_MATERIAU(ELAS=_F(E=" + el.material.youngsModulus + ",NU=" + el.material.poissonRatio + \ ",RHO=" + el.material.density + ",ALPHA=" + el.material.alpha + ",),);" + ls matDefiStr = matDefiStr + ls # reading/modifying the mesh meshName = "MAIL0" # reading meshStr = "# reading/modifying the mesh" + ls + meshName + "=LIRE_MAILLAGE(FORMAT='MED',);" + ls # create points for node joints if len(self.nodeJointSets) > 0: meshName = "MAIL1" meshStr = meshStr + meshName + "=CREA_MAILLAGE(MAILLAGE=MAIL0,CREA_POI1=(" for el in self.nodeJointSets: meshStr = meshStr + "_F(NOM_GROUP_MA='" + el.nodeName + "',GROUP_NO='" + el.nodeName + "',)," + ls meshStr = meshStr + "),);" + ls # mesh adaption for pressure loads and contacts groupMAStr = "" groupMAList = [] for el in self.loadSets: if el.loadType == "Pressure" and not el.nodalGroupName in groupMAList: groupMAStr = groupMAStr + "'" + el.nodalGroupName + "'," groupMAList.append(el.nodalGroupName) if self.analysisType == "non-linear static": for el in self.contactSets: if not el.masterName in groupMAList: groupMAStr = groupMAStr + "'" + el.masterName + "'," groupMAList.append(el.masterName) if not el.slaveName in groupMAList: groupMAStr = groupMAStr + "'" + el.slaveName + "'," groupMAList.append(el.slaveName) if not groupMAStr == "": meshStr = meshStr + meshName + "=MODI_MAILLAGE(reuse=" + meshName + ",MAILLAGE=" + meshName + ",ORIE_PEAU_3D=_F(GROUP_MA=(" + ls + \ groupMAStr + "),),);" + ls meshStr = meshStr + ls # create model modelStr = "# create model" + ls + "MODE=AFFE_MODELE(MAILLAGE=" + meshName + ",AFFE=(_F(TOUT='OUI',PHENOMENE='MECANIQUE',MODELISATION='3D',)," + ls groupMAStr = "" for el in self.nodeJointSets: groupMAStr = groupMAStr + ls + "'" + el.nodeName + "'," if not groupMAStr == "": modelStr = modelStr + "_F(GROUP_MA=(" + groupMAStr + ")," + ls + "PHENOMENE='MECANIQUE',MODELISATION='DIS_TR',)," modelStr = modelStr + "),);" + ls + ls # create temperature fields from constant or function tempFieldStr = "" tempFieldNames = [] if sum([self.thermalSets[i].assiType == "const" for i in range(len(self.thermalSets))]) > 0: tempFieldStr = "# Create temperature fields" + ls for el in self.thermalSets: if el.assiType == "const": tempFieldNames.append("TFld" + str(len(tempFieldNames))) tempFieldStr = tempFieldStr + tempFieldNames[-1] + "=CREA_CHAMP(TYPE_CHAM='NOEU_TEMP_" if hasFunction(functionsLocal,el.deltaT): tempFieldStr = tempFieldStr + "F" else: tempFieldStr = tempFieldStr + "R" tempFieldStr = tempFieldStr + "',OPERATION='AFFE',MODELE=MODE,AFFE=(_F(" if el.nodalGroupName == "whole mesh": tempFieldStr = tempFieldStr + "TOUT='OUI'" else: tempFieldStr = tempFieldStr + "GROUP_MA='" + el.nodalGroupName + "'" tempFieldStr = tempFieldStr + ",NOM_CMP='TEMP'" if hasFunction(functionsLocal,el.deltaT): tempFieldStr = tempFieldStr + ",VALE_F=" + getFormuleName(el.deltaT) else: tempFieldStr = tempFieldStr + ",VALE=" + el.deltaT tempFieldStr = tempFieldStr + ",),),);" + ls if not tempFieldStr == "": tempFieldStr = tempFieldStr + ls # create a code_aster-result for all temp. fields tempResStr = "" tempResNames = [] if len(tempFieldNames) > 0: tempResStr = "# Create results for all temperature fields" + ls for el in tempFieldNames: tempResNames.append("TRes" + str(len(tempResNames))) tempResStr = tempResStr + tempResNames[-1] + "=CREA_RESU(OPERATION='AFFE',TYPE_RESU='EVOL_THER',NOM_CHAM='TEMP',AFFE=_F(CHAM_GD=" + el + "," if int(self.timeSteps) > 0: tempResStr = tempResStr + "LIST_INST=TLIST" else: tempResStr = tempResStr + "INST=0.0" tempResStr = tempResStr + ",),);" + ls if not tempResStr == "": tempResStr = tempResStr + ls # create a code_aster-result for the temp. field from a med-file if sum([self.thermalSets[i].assiType == "file" for i in range(len(self.thermalSets))]) > 0: tempResStr = tempResStr + "# create result for the temperature field from a med-files" for el in self.thermalSets: if el.assiType == "file": tempResNames.append("TRes" + str(len(tempResNames))) tempResStr = tempResStr + tempResNames[-1] + "=LIRE_RESU(TYPE_RESU='EVOL_THER',FORMAT='MED'," + \ "MAILLAGE=" + meshName + "," + ls + "UNITE=" + el.unite + "," + \ "FORMAT_MED=_F(NOM_CHAM='TEMP',NOM_CHAM_MED='TEMP____TEMP',),TOUT_ORDRE='OUI',);" + ls if sum([self.thermalSets[i].assiType == "file" for i in range(len(self.thermalSets))]) > 0: tempResStr = tempResStr + ls # assign materials and temperature results matTempAssiStr = "# Assign materials and temp. results" + ls + "MATE=AFFE_MATERIAU(MAILLAGE=" + meshName + ",AFFE=(" + ls i=0 for el in self.materialSets: matTempAssiStr = matTempAssiStr + "_F(" if el.nodalGroupName == "whole mesh": matTempAssiStr = matTempAssiStr + "TOUT='OUI'," else: matTempAssiStr = matTempAssiStr + "GROUP_MA='" + el.nodalGroupName + "'," matTempAssiStr = matTempAssiStr + "MATER=" + matDefiNames[i] + ",)," + ls i = i+1 matTempAssiStr = matTempAssiStr + ")," i = 0 if len(self.thermalSets) > 0: matTempAssiStr = matTempAssiStr + "AFFE_VARC=(" + ls for el in self.thermalSets: matTempAssiStr = matTempAssiStr + "_F(" if el.nodalGroupName == "whole mesh": matTempAssiStr = matTempAssiStr + "TOUT='OUI'" else: matTempAssiStr = matTempAssiStr + "GROUP_MA='" + el.nodalGroupName + "'" matTempAssiStr = matTempAssiStr + ",NOM_VARC='TEMP',EVOL=" + tempResNames[i] + "," if el.assiType == "file": matTempAssiStr = matTempAssiStr + "VALE_REF=" + str(float(el.T0)) else: matTempAssiStr = matTempAssiStr + "VALE_REF=0.0" matTempAssiStr = matTempAssiStr + ",)," + ls i = i+1 if len(self.thermalSets) > 0: matTempAssiStr = matTempAssiStr + ")," matTempAssiStr = matTempAssiStr + ");" + ls + ls # assign properties for node joints caraStr = "" for el in self.nodeJointSets: if caraStr == "": caraStr = "# assign properties for node joints" + ls + "CARA=AFFE_CARA_ELEM(MODELE=MODE,DISCRET=(" caraStr = caraStr + "_F(CARA='K_TR_D_N',GROUP_MA='" + el.nodeName + "',VALE=(" + el.cX + "," + el.cY + "," + el.cZ + \ "," + el.cPhiX + "," + el.cPhiY + "," + el.cPhiZ + ",),)," + ls if not caraStr == "": caraStr = caraStr + "),);" + ls + ls # assign restraints/loads via formules affeCharMecaFStr = "" # restraints for el in restraintSetsLocal: hasFormulesTrans = hasFunction(functionsLocal,el.deltaX, el.deltaY, el.deltaZ) # at least one delta is not numeric (is a function) hasFormulesRot = 0 if el.nodalGroupName in [self.nodeJointSets[i].nodeName for i in range(len(self.nodeJointSets))]: # restraint applied to a node of a node joint -> rotational DOFs hasFormulesRot = hasFunction(functionsLocal,el.deltaPhiX, el.deltaPhiY, el.deltaPhiZ) if hasFormulesTrans or hasFormulesRot: # restraint uses at least one function affeCharMecaFStr = affeCharMecaFStr + "_F(GROUP_NO='" + el.nodalGroupName + "'," if hasFunction(functionsLocal,el.deltaX): affeCharMecaFStr = affeCharMecaFStr + "DX=" + getFormuleName(el.deltaX) + "," if hasFunction(functionsLocal,el.deltaY): affeCharMecaFStr = affeCharMecaFStr + "DY=" + getFormuleName(el.deltaY) + "," if hasFunction(functionsLocal,el.deltaZ): affeCharMecaFStr = affeCharMecaFStr + "DZ=" + getFormuleName(el.deltaZ) + "," if hasFormulesRot: if hasFunction(functionsLocal,el.deltaPhiX): affeCharMecaFStr = affeCharMecaFStr + "DRX=" + getFormuleName(el.deltaPhiX) + "," if hasFunction(functionsLocal,el.deltaPhiY): affeCharMecaFStr = affeCharMecaFStr + "DRY=" + getFormuleName(el.deltaPhiY) + "," if hasFunction(functionsLocal,el.deltaPhiZ): affeCharMecaFStr = affeCharMecaFStr + "DRZ=" + getFormuleName(el.deltaPhiZ) + "," affeCharMecaFStr = affeCharMecaFStr + ")," + ls if not affeCharMecaFStr == "": affeCharMecaFStr = "DDL_IMPO=(" + ls + affeCharMecaFStr + ")," + ls # loads forceOnVolumeStr = "" forceOnFaceStr = "" forceOnEdgeStr = "" forceOnNodeStr = "" pressureStr = "" for el in self.loadSets: # forces/torques if el.loadType in ["Force on volume","Force on face","Force on edge","Force on node"]: hasFormulesForce = hasFunction(functionsLocal,el.FX,el.FY,el.FZ) hasFormulesTorque = 0 if el.loadType == "Force on node": if el.nodalGroupName in [self.nodeJointSets[i].nodeName for i in range(len(self.nodeJointSets))]: # load applied to a node of a node joint -> torque assignment possible hasFormulesTorque = hasFunction(functionsLocal,el.MX,el.MY,el.MZ) if hasFormulesForce or hasFormulesTorque: if el.nodalGroupName == "whole mesh": assiStr = "TOUT='OUI'," elif el.loadType == "Force on node": assiStr = "GROUP_NO='" + el.nodalGroupName + "'," else: assiStr = "GROUP_MA='" + el.nodalGroupName + "'," tempStr = "_F(" + assiStr if hasFunction(functionsLocal,el.FX): tempStr = tempStr + "FX=" + getFormuleName(el.FX) + "," if hasFunction(functionsLocal,el.FY): tempStr = tempStr + "FY=" + getFormuleName(el.FY) + "," if hasFunction(functionsLocal,el.FZ): tempStr = tempStr + "FZ=" + getFormuleName(el.FZ) + "," if hasFormulesTorque: if hasFunction(functionsLocal,el.MX): tempStr = tempStr + "MX=" + getFormuleName(el.MX) + "," if hasFunction(functionsLocal,el.MY): tempStr = tempStr + "MY=" + getFormuleName(el.MY) + "," if hasFunction(functionsLocal,el.MZ): tempStr = tempStr + "MZ=" + getFormuleName(el.MZ) + "," tempStr = tempStr + ")," + ls if el.loadType == "Force on volume": forceOnVolumeStr = forceOnVolumeStr + tempStr elif el.loadType == "Force on face": forceOnFaceStr = forceOnFaceStr + tempStr elif el.loadType == "Force on edge": forceOnEdgeStr = forceOnEdgeStr + tempStr elif el.loadType == "Force on node": forceOnNodeStr = forceOnNodeStr + tempStr # pressures if el.loadType == "Pressure": if hasFunction(functionsLocal,el.p): pressureStr = pressureStr + "_F(GROUP_MA='" + el.nodalGroupName + "',PRES=" + getFormuleName(el.p) + ",)," + ls if not forceOnVolumeStr == "": affeCharMecaFStr = affeCharMecaFStr + "FORCE_INTERNE=(" + ls + forceOnVolumeStr + ")," + ls if not forceOnFaceStr == "": affeCharMecaFStr = affeCharMecaFStr + "FORCE_FACE=(" + ls + forceOnFaceStr + ")," + ls if not forceOnEdgeStr == "": affeCharMecaFStr = affeCharMecaFStr + "FORCE_ARETE=(" + ls + forceOnEdgeStr + ")," + ls if not forceOnNodeStr == "": affeCharMecaFStr = affeCharMecaFStr + "FORCE_NODALE=(" + ls + forceOnNodeStr + ")," + ls if not pressureStr == "": affeCharMecaFStr = affeCharMecaFStr + "PRES_REP=(" + ls + pressureStr + ")," + ls if not affeCharMecaFStr == "": affeCharMecaFStr = "# assign restraints/loads via formules" + ls + "CHARF=AFFE_CHAR_MECA_F(MODELE=MODE," + ls + affeCharMecaFStr + ");" + ls + ls # assign remaining restraints, node joints and loads affeCharMecaStr = "" # restraints for el in restraintSetsLocal: hasConstantsTrans = hasConstant(functionsLocal,el.deltaX, el.deltaY, el.deltaZ) # at least one delta is not a function hasConstantsRot = 0 if el.nodalGroupName in [self.nodeJointSets[i].nodeName for i in range(len(self.nodeJointSets))]: # restraint applied to a node of a node joint -> rotational DOFs hasConstantsRot = hasConstant(functionsLocal,el.deltaPhiX, el.deltaPhiY, el.deltaPhiZ) if hasConstantsTrans or hasConstantsRot: # restraint uses at least one constant if not el.rotMatViaPython: affeCharMecaStr = affeCharMecaStr + "_F(GROUP_NO='" + el.nodalGroupName + "'," if hasConstant(functionsLocal,el.deltaX): affeCharMecaStr = affeCharMecaStr + "DX=" + el.deltaX + "," if hasConstant(functionsLocal,el.deltaY): affeCharMecaStr = affeCharMecaStr + "DY=" + el.deltaY + "," if hasConstant(functionsLocal,el.deltaZ): affeCharMecaStr = affeCharMecaStr + "DZ=" + el.deltaZ + "," if hasConstantsRot: if hasConstant(functionsLocal,el.deltaPhiX): affeCharMecaStr = affeCharMecaStr + "DRX=" + el.deltaPhiX + "," if hasConstant(functionsLocal,el.deltaPhiY): affeCharMecaStr = affeCharMecaStr + "DRY=" + el.deltaPhiY + "," if hasConstant(functionsLocal,el.deltaPhiZ): affeCharMecaStr = affeCharMecaStr + "DRZ=" + el.deltaPhiZ + "," affeCharMecaStr = affeCharMecaStr + ")," + ls if not affeCharMecaStr == "": affeCharMecaStr = "DDL_IMPO=(" + ls + affeCharMecaStr + ")," + ls # node joints nodeJointsStr = "" for el in self.nodeJointSets: nodeJointsStr = nodeJointsStr + "_F(GROUP_NO='" + el.jointGroupName + "',)," + ls # loads forceOnVolumeStr = "" forceOnFaceStr = "" forceOnEdgeStr = "" forceOnNodeStr = "" pressureStr = "" gravityStr = "" centrifugalForceStr = "" for el in self.loadSets: # forces/torques if el.loadType in ["Force on volume","Force on face","Force on edge","Force on node"]: hasConstantsForce = hasConstant(functionsLocal,el.FX,el.FY,el.FZ) hasConstantsTorque = 0 if el.loadType == "Force on node": if el.nodalGroupName in [self.nodeJointSets[i].nodeName for i in range(len(self.nodeJointSets))]: # load applied to a node of a node joint -> torque assignment possible hasConstantsTorque = hasConstant(functionsLocal,el.MX,el.MY,el.MZ) if hasConstantsForce or hasConstantsTorque: if el.nodalGroupName == "whole mesh": assiStr = "TOUT='OUI'," elif el.loadType == "Force on node": assiStr = "GROUP_NO='" + el.nodalGroupName + "'," else: assiStr = "GROUP_MA='" + el.nodalGroupName + "'," tempStr = "_F(" + assiStr if hasConstant(functionsLocal,el.FX): tempStr = tempStr + "FX=" + el.FX + "," if hasConstant(functionsLocal,el.FY): tempStr = tempStr + "FY=" + el.FY + "," if hasConstant(functionsLocal,el.FZ): tempStr = tempStr + "FZ=" + el.FZ + "," if hasConstantsTorque: if hasConstant(functionsLocal,el.MX): tempStr = tempStr + "MX=" + el.MX + "," if hasConstant(functionsLocal,el.MY): tempStr = tempStr + "MY=" + gel.MY + "," if hasConstant(functionsLocal,el.MZ): tempStr = tempStr + "MZ=" + el.MZ + "," tempStr = tempStr + ")," + ls if el.loadType == "Force on volume": forceOnVolumeStr = forceOnVolumeStr + tempStr elif el.loadType == "Force on face": forceOnFaceStr = forceOnFaceStr + tempStr elif el.loadType == "Force on edge": forceOnEdgeStr = forceOnEdgeStr + tempStr elif el.loadType == "Force on node": forceOnNodeStr = forceOnNodeStr + tempStr # pressures if el.loadType == "Pressure": if hasConstant(functionsLocal,el.p): pressureStr = pressureStr + "_F(GROUP_MA='" + el.nodalGroupName + "',PRES=" + el.p + ",)," + ls # gravity if el.loadType == "Gravity": g = (float(el.gX)**2+float(el.gY)**2+float(el.gZ)**2)**0.5 dirStr = "(" + str(float(el.gX)/g) + "," + str(float(el.gY)/g) + "," + str(float(el.gZ)/g) + ",)" if el.nodalGroupName == "whole mesh": assiStr = "" else: assiStr = "GROUP_MA='" + el.nodalGroupName + "'," gravityStr = gravityStr + "_F(" + assiStr + "GRAVITE=" + str(g) + ",DIRECTION=" + dirStr + ",)," + ls # centrifugal forces if el.loadType == "Centrifugal force": if el.nodalGroupName == "whole mesh": assiStr = "TOUT='OUI'," else: assiStr = "GROUP_MA='" + el.nodalGroupName + "'," centrifugalForceStr = centrifugalForceStr + "_F(" + assiStr + "VITESSE=" + el.omega + ",AXE=(" + \ el.axisX + "," + el.axisY + "," + el.axisZ + ",),CENTRE=(" + el.centerX + "," + el.centerY + "," + el.centerZ + ",),)," + ls if not nodeJointsStr == "": affeCharMecaStr = affeCharMecaStr + "LIAISON_SOLIDE=(" + ls + nodeJointsStr + ")," + ls if not forceOnVolumeStr == "": affeCharMecaStr = affeCharMecaStr + "FORCE_INTERNE=(" + ls + forceOnVolumeStr + ")," + ls if not forceOnFaceStr == "": affeCharMecaStr = affeCharMecaStr + "FORCE_FACE=(" + ls + forceOnFaceStr + ")," + ls if not forceOnEdgeStr == "": affeCharMecaStr = affeCharMecaStr + "FORCE_ARETE=(" + ls + forceOnEdgeStr + ")," + ls if not forceOnNodeStr == "": affeCharMecaStr = affeCharMecaStr + "FORCE_NODALE=(" + ls + forceOnNodeStr + ")," + ls if not pressureStr == "": affeCharMecaStr = affeCharMecaStr + "PRES_REP=(" + ls + pressureStr + ")," + ls if not gravityStr == "": affeCharMecaStr = affeCharMecaStr + "PESANTEUR=(" + ls + gravityStr + ")," + ls if not centrifugalForceStr == "": affeCharMecaStr = affeCharMecaStr + "ROTATION=(" + ls + centrifugalForceStr + ")," + ls if not affeCharMecaStr == "": affeCharMecaStr = "# assign constant restraints/loads and node joints" + ls + "CHAR=AFFE_CHAR_MECA(MODELE=MODE," + ls + affeCharMecaStr + ");" + ls + ls # contact definition contactStr = "" if self.analysisType == "non-linear static" and len(self.contactSets) > 0: contactStr = "# contact definition" + ls +"CONT=DEFI_CONTACT(MODELE=MODE," if self.contactSets[0].globalSettings.formulationType == "discrete": contactStr = contactStr + "FORMULATION='DISCRETE'," else: contactStr = contactStr + "FORMULATION='CONTINUE',ALGO_RESO_CONT='" + self.contactSets[0].globalSettings.contactAlgo + "'," if self.contactSets[0].globalSettings.frictionModel == "Coulomb": contactStr = contactStr + "FROTTEMENT='COULOMB'," if self.contactSets[0].globalSettings.formulationType == "continuous": contactStr = contactStr + "ALGO_RESO_FROT='" + self.contactSets[0].globalSettings.frictionAlgo + "'," else: contactStr = contactStr + "FROTTEMENT='SANS'," contactStr = contactStr + "ZONE=" for el in self.contactSets: contactStr = contactStr + ls + "_F(GROUP_MA_MAIT='" + el.masterName + "',GROUP_MA_ESCL='" + el.slaveName + "'," if el.globalSettings.formulationType == "discrete": contactStr = contactStr + "ALGO_CONT='" + el.contactAlgo + "'," if el.contactAlgo == "PENALISATION": contactStr = contactStr + "E_N=" + el.E_N + "," if el.globalSettings.frictionModel == "Coulomb": contactStr = contactStr + "COULOMB=" + el.fricCoeff + "," + "ALGO_FROT='PENALISATION',E_T=" + el.E_T + "," else: if el.globalSettings.frictionModel == "Coulomb": contactStr = contactStr + "COULOMB=" + el.fricCoeff + "," contactStr = contactStr + ")," contactStr = contactStr + ");" + ls + ls # setting up and calling the solver if self.analysisType == "linear static": # MECA_STATIQUE solverStr = "# calling MECA_STATIQUE" + ls + "RESU=MECA_STATIQUE(MODELE=MODE,CHAM_MATER=MATE," if not caraStr == "": solverStr = solverStr + "CARA_ELEM=CARA," solverStr = solverStr + "EXCIT=(" if not affeCharMecaStr == "": solverStr = solverStr + "_F(CHARGE=CHAR," if not tListStr == "" and (self.timeRampUp or self.timeRampDown): solverStr = solverStr + "FONC_MULT=SF,)," else: solverStr = solverStr + ")," if not affeCharMecaFStr == "": solverStr = solverStr + "_F(CHARGE=CHARF," if not tListStr == "" and self.timeRampFunc and (self.timeRampUp or self.timeRampDown): solverStr = solverStr + "FONC_MULT=SF,)," else: solverStr = solverStr + ")," solverStr = solverStr + ")," if not tListStr == "": solverStr = solverStr + "LIST_INST=TLIST," solverStr = solverStr + "SOLVEUR=_F(METHODE='" + self.method + "',),);" + ls + ls else: # STAT_NON_LINE solverStr = "# calling STAT_NON_LINE" + ls + "RESU=STAT_NON_LINE(MODELE=MODE,CHAM_MATER=MATE," if not caraStr == "": solverStr = solverStr + "CARA_ELEM=CARA," solverStr = solverStr + "EXCIT=(" if not affeCharMecaStr == "": solverStr = solverStr + "_F(CHARGE=CHAR," if (self.timeRampUp or self.timeRampDown): solverStr = solverStr + "FONC_MULT=SF,)," else: solverStr = solverStr + ")," if not affeCharMecaFStr == "": solverStr = solverStr + "_F(CHARGE=CHARF," if self.timeRampFunc and (self.timeRampUp or self.timeRampDown): solverStr = solverStr + "FONC_MULT=SF,)," else: solverStr = solverStr + ")," solverStr = solverStr + ")," if not contactStr == "": solverStr = solverStr + "CONTACT=CONT," + ls if self.strainModel == "Green-Lagrange": solverStr = solverStr + "COMPORTEMENT=_F(RELATION='ELAS',DEFORMATION='GROT_GDEP',)," + ls solverStr = solverStr + "NEWTON=_F(REAC_ITER=1,),INCREMENT=_F(LIST_INST=TLIST,),CONVERGENCE=_F(ITER_GLOB_MAXI=" + self.maxIter + ",RESI_GLOB_RELA=" + self.resi + \ "),SOLVEUR=_F(METHODE='" + self.method + "',),);" + ls + ls # compute quantities from result calcChampStr = "" if self.outputSet.SIGM + self.outputSet.EPS + self.outputSet.SIEQ + self.outputSet.REAC > 0: calcChampStr = "# compute output quantities" + ls + "RESU=CALC_CHAMP(reuse =RESU,RESULTAT=RESU," + ls if self.outputSet.SIGM: calcChampStr = calcChampStr + "CONTRAINTE=('SIGM_NOEU',)," + ls if self.outputSet.EPS: if self.strainModel == "Green-Lagrange" and self.analysisType == "non-linear static": calcChampStr = calcChampStr + "DEFORMATION=('EPSG_NOEU',)," + ls else: calcChampStr = calcChampStr + "DEFORMATION=('EPSI_NOEU',)," + ls if self.outputSet.SIEQ: calcChampStr = calcChampStr + "CRITERES=('SIEQ_NOEU',)," + ls if self.outputSet.REAC: calcChampStr = calcChampStr + "FORCE=('REAC_NODA',)," + ls calcChampStr = calcChampStr + ");" + ls + ls # estimate error erreurStr = "" if self.outputSet.ERME: erreurStr = "# error estimation a posteriori " + ls + "RESU=CALC_ERREUR(reuse=RESU,RESULTAT=RESU,OPTION=('ERME_ELEM',),);" + ls + ls # compute reactions at restraints reacStr = "" if self.outputSet.REAC and len(restraintSetsLocal) > 0: reacStr = "# integrate reactions at restraints" + ls + "Reac_Sum=POST_RELEVE_T(ACTION=(" for el in restraintSetsLocal: reacStr = reacStr + "_F(OPERATION='EXTRACTION',INTITULE='" + el.nodalGroupName + \ "',RESULTAT=RESU,NOM_CHAM='REAC_NODA',GROUP_NO=('" + el.nodalGroupName + "',),RESULTANTE=('DX','DY','DZ',),MOMENT=('DRX','DRY','DRY',)," + \ "POINT=(" + el.reacMX + "," + el.reacMY + "," + el.reacMZ + ",),)," reacStr = reacStr + "),);" + ls + ls + "IMPR_TABLE(TABLE=Reac_Sum,);" + ls + ls # write the results to file writeStr = "# write result to file (mechanical quantities)" + ls + "IMPR_RESU(FORMAT='MED',RESU=_F(RESULTAT=RESU,NOM_CHAM=('DEPL'," if self.outputSet.SIGM: writeStr = writeStr + "'SIGM_NOEU'," if self.outputSet.SIEQ: writeStr = writeStr + "'SIEQ_NOEU'," if self.outputSet.EPS: if self.strainModel == "Green-Lagrange" and self.analysisType == "non-linear static": writeStr = writeStr + "'EPSG_NOEU'," else: writeStr = writeStr + "'EPSI_NOEU'," if self.outputSet.REAC: writeStr = writeStr + "'REAC_NODA'," if self.outputSet.ERME: writeStr = writeStr + "'ERME_ELEM'," writeStr = writeStr + ")," if self.outputSet.nodalGroupName == "whole mesh": writeStr = writeStr + "TOUT='OUI',),);" + ls + ls else: writeStr = writeStr + "GROUP_MA='" + self.outputSet.nodalGroupName + "',),);" + ls + ls if self.outputSet.TEMP and len(self.thermalSets) > 0: writeStr = writeStr + "# write result to file (temperature)" + ls for el in tempResNames: writeStr = writeStr + "IMPR_RESU(FORMAT='MED',RESU=_F(RESULTAT=" + el + ",NOM_CHAM='TEMP',TOUT='OUI',),);" + ls writeStr = writeStr + ls # FIN statement finStr = "FIN();" # assemble everything commStr = pythonFuns + debutStr + tListStr + formuleStr + matDefiStr + meshStr + modelStr + tempFieldStr + tempResStr + matTempAssiStr + caraStr + affeCharMecaFStr + \ affeCharMecaStr + contactStr + solverStr + calcChampStr + erreurStr + reacStr + writeStr + finStr return commStr from django.conf.urls import patterns, include, url from pombola.south_africa.views import ( OldSectionRedirect, OldSpeechRedirect, SASpeechView, SASectionView, SAHansardIndex, SACommitteeIndex, SAQuestionIndex, SASpeakerRedirectView) # We add redirects for the old-style SayIt patterns, so that old # bookmarks aren't lost. For examples, links to speeches should still # work, e.g. # # /question/speech/367721 # /hansard/speech/7606 # /committee/speech/318055 # -> http://www.pmg.org.za/report/20131008-auditor-general-key-challenges-in-agriculture-departments-audit-report-2013-minister-in-attendance # # (The last one should be a redirect.) Old-style links to SayIt # sections should still work too, e.g.: # # /question/59146 # /hansard/928 urlpatterns = [ url(r'^committee/(?P\d+)$', OldSectionRedirect.as_view()), url(r'^question/(?P\d+)$', OldSectionRedirect.as_view()), url(r'^hansard/(?P\d+)$', OldSectionRedirect.as_view()), url(r'^committee/speech/(?P\d+)$', OldSpeechRedirect.as_view()), url(r'^question/speech/(?P\d+)$', OldSpeechRedirect.as_view()), url(r'^hansard/speech/(?P\d+)$', OldSpeechRedirect.as_view()), ] # Make sure the top level custom indexes work: urlpatterns += patterns('', url(r'^hansard/?$', SAHansardIndex.as_view(), name='section-list-hansard'), url(r'^committee-minutes/?$', SACommitteeIndex.as_view(), name='section-list-committee-minutes'), url(r'^question/?$', SAQuestionIndex.as_view(), name='section-list-question'), ) # Anything else unmatched we assume is dealt with by SayIt (which will # return a 404 if the path is unknown anyway): fallback_sayit_patterns = patterns('', # Exposed endpoint for a speech referred to by a numeric ID: url(r'^speech/(?P\d+)$', SASpeechView.as_view(), name='speech-view'), # Fake endpoint to redirect to the right speaker: url(r'^speaker/(?P[-\w]+)$', SASpeakerRedirectView.as_view(), name='speaker-view'), # Anything else might be a slug referring to a section: url(r'^(?P.+)$', SASectionView.as_view(), name='section-view'), ) urlpatterns += patterns('', url(r'', include(fallback_sayit_patterns, namespace='sayit', app_name='speeches')), ) #!/usr/bin/python #title :check_sync.py #description :Checks Satellite 6 repository sync status #URL :https://github.com/RedHatSatellite/sat6_disconnected_tools #author :Geoff Gatward #notes :This script is NOT SUPPORTED by Red Hat Global Support Services. #license :GPLv3 #============================================================================== """Check the status of Sync tasks. Draws the users attention to problems in the sync tasks that could lead to inconsistent repository states. Call with -l switch to loop until all sync tasks are complete, otherwise runs as a one-shot check. """ import sys, os, argparse, time import simplejson as json import helpers def check_running_tasks(clear): """Check for any currently running Sync tasks. Checks for any Synchronize tasks in running/paused or Incomplete state. """ #pylint: disable-msg=R0912,R0914,R0915 # Clear the screen if clear: os.system('clear') print helpers.HEADER + "Checking for running/paused yum sync tasks..." + helpers.ENDC tasks = helpers.get_p_json( helpers.FOREMAN_API + "tasks/", json.dumps( { "per_page": "100", } ) ) # From the list of tasks, look for any running export or sync jobs. # If e have any we exit, as we can't export in this state. running_sync = 0 for task_result in tasks['results']: if task_result['state'] == 'running' and task_result['label'] != 'Actions::BulkAction': if task_result['humanized']['action'] == 'Synchronize': running_sync = 1 print helpers.BOLD + "Running: " + helpers.ENDC \ + task_result['input']['repository']['name'] if task_result['state'] == 'paused' and task_result['label'] != 'Actions::BulkAction': if task_result['humanized']['action'] == 'Synchronize': running_sync = 1 print helpers.ERROR + "Paused: " + helpers.ENDC \ + task_result['input']['repository']['name'] if not running_sync: print helpers.GREEN + "None detected" + helpers.ENDC # Check any repos marked as Sync Incomplete print helpers.HEADER + "\nChecking for incomplete (stopped) yum sync tasks..." + helpers.ENDC repo_list = helpers.get_json( helpers.KATELLO_API + "/content_view_versions") # Extract the list of repo ids, then check the state of each one. incomplete_sync = 0 for repo in repo_list['results']: for repo_id in repo['repositories']: repo_status = helpers.get_json( helpers.KATELLO_API + "/repositories/" + str(repo_id['id'])) if repo_status['content_type'] == 'yum': if repo_status['last_sync'] is None: if repo_status['library_instance_id'] is None: # incomplete_sync = 1 # print helpers.ERROR + "Broken Repo: " + helpers.ENDC + repo_status['name'] print helpers.WARNING + "Never Synchronized: " + helpers.ENDC + repo_status['name'] elif repo_status['last_sync']['state'] == 'stopped': if repo_status['last_sync']['result'] == 'warning': incomplete_sync = 1 print helpers.WARNING + "Incomplete: " + helpers.ENDC + repo_status['name'] else: msg = repo_status['name'] + " - last_sync: " + repo_status['last_sync']['ended_at'] helpers.log_msg(msg, 'DEBUG') # If we have detected incomplete sync tasks, ask the user if they want to export anyway. # This isn't fatal, but *MAY* lead to inconsistent repositories on the disconnected sat. if not incomplete_sync: print helpers.GREEN + "No incomplete syncs detected\n" + helpers.ENDC else: print "\n" # Exit the loop if both tests are clear if not running_sync and not incomplete_sync: sys.exit(0) def main(args): """Check the status of Sync tasks.""" #pylint: disable-msg=R0914,R0915 parser = argparse.ArgumentParser(description='Checks status of yum repository sync tasks.') # pylint: disable=bad-continuation parser.add_argument('-l', '--loop', help='Loop check until all tasks complete', required=False, action="store_true") args = parser.parse_args() # Check if there are any currently running tasks that will conflict with an export # Loop until all tasks are compltete. if args.loop: try: while True: clear = True check_running_tasks(clear) time.sleep(5) except KeyboardInterrupt: print "End" else: clear = False check_running_tasks(clear) sys.exit(0) if __name__ == "__main__": try: main(sys.argv[1:]) except KeyboardInterrupt, e: print >> sys.stderr, ("\n\nExiting on user cancel.") sys.exit(1) """ Content negotiation deals with selecting an appropriate renderer given the incoming request. Typically this will be based on the request's Accept header. """ from __future__ import unicode_literals from django.http import Http404 from rest_framework import HTTP_HEADER_ENCODING, exceptions from rest_framework.settings import api_settings from rest_framework.utils.mediatypes import ( _MediaType, media_type_matches, order_by_precedence ) class BaseContentNegotiation(object): def select_parser(self, request, parsers): raise NotImplementedError('.select_parser() must be implemented') def select_renderer(self, request, renderers, format_suffix=None): raise NotImplementedError('.select_renderer() must be implemented') class DefaultContentNegotiation(BaseContentNegotiation): settings = api_settings def select_parser(self, request, parsers): """ Given a list of parsers and a media type, return the appropriate parser to handle the incoming request. """ for parser in parsers: if media_type_matches(parser.media_type, request.content_type): return parser return None def select_renderer(self, request, renderers, format_suffix=None): """ Given a request and a list of renderers, return a two-tuple of: (renderer, media type). """ # Allow URL style format override. eg. "?format=json format_query_param = self.settings.URL_FORMAT_OVERRIDE format = format_suffix or request.query_params.get(format_query_param) if format: renderers = self.filter_renderers(renderers, format) accepts = self.get_accept_list(request) # Check the acceptable media types against each renderer, # attempting more specific media types first # NB. The inner loop here isn't as bad as it first looks :) # Worst case is we're looping over len(accept_list) * len(self.renderers) for media_type_set in order_by_precedence(accepts): for renderer in renderers: for media_type in media_type_set: if media_type_matches(renderer.media_type, media_type): # Return the most specific media type as accepted. media_type_wrapper = _MediaType(media_type) if ( _MediaType(renderer.media_type).precedence > media_type_wrapper.precedence ): # Eg client requests '*/*' # Accepted media type is 'application/json' full_media_type = ';'.join( (renderer.media_type,) + tuple('{0}={1}'.format( key, value.decode(HTTP_HEADER_ENCODING)) for key, value in media_type_wrapper.params.items())) return renderer, full_media_type else: # Eg client requests 'application/json; indent=8' # Accepted media type is 'application/json; indent=8' return renderer, media_type raise exceptions.NotAcceptable(available_renderers=renderers) def filter_renderers(self, renderers, format): """ If there is a '.json' style format suffix, filter the renderers so that we only negotiation against those that accept that format. """ renderers = [renderer for renderer in renderers if renderer.format == format] if not renderers: raise Http404 return renderers def get_accept_list(self, request): """ Given the incoming request, return a tokenised list of media type strings. Allows URL style accept override. eg. "?accept=application/json" """ header = request.META.get('HTTP_ACCEPT', '*/*') header = request.query_params.get(self.settings.URL_ACCEPT_OVERRIDE, header) return [token.strip() for token in header.split(',')] # -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Comments.datetime' db.add_column(u'chat_comments', 'datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2014, 4, 30, 0, 0), blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'Comments.datetime' db.delete_column(u'chat_comments', 'datetime') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'chat.comments': { 'Meta': {'object_name': 'Comments'}, 'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['chat'] """ Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. """ from __future__ import absolute_import import sys import os import io import time import re import types import zipfile import zipimport import warnings import stat import functools import pkgutil import token import symbol import operator import platform import collections import plistlib import email.parser import tempfile import textwrap from pkgutil import get_importer try: import _imp except ImportError: # Python 3.2 compatibility import imp as _imp PY3 = sys.version_info > (3,) PY2 = not PY3 if PY3: from urllib.parse import urlparse, urlunparse if PY2: from urlparse import urlparse, urlunparse if PY3: string_types = str, else: string_types = str, eval('unicode') iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems() # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split # Avoid try/except due to potential problems with delayed import mechanisms. if sys.version_info >= (3, 3) and sys.implementation.name == "cpython": import importlib.machinery as importlib_machinery else: importlib_machinery = None try: import parser except ImportError: pass try: import pkg_resources._vendor.packaging.version import pkg_resources._vendor.packaging.specifiers packaging = pkg_resources._vendor.packaging except ImportError: # fallback to naturally-installed version; allows system packagers to # omit vendored packages. import packaging.version import packaging.specifiers # declare some globals that will be defined later to # satisfy the linters. require = None working_set = None class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with PEP 440. """ class _SetuptoolsVersionMixin(object): def __hash__(self): return super(_SetuptoolsVersionMixin, self).__hash__() def __lt__(self, other): if isinstance(other, tuple): return tuple(self) < other else: return super(_SetuptoolsVersionMixin, self).__lt__(other) def __le__(self, other): if isinstance(other, tuple): return tuple(self) <= other else: return super(_SetuptoolsVersionMixin, self).__le__(other) def __eq__(self, other): if isinstance(other, tuple): return tuple(self) == other else: return super(_SetuptoolsVersionMixin, self).__eq__(other) def __ge__(self, other): if isinstance(other, tuple): return tuple(self) >= other else: return super(_SetuptoolsVersionMixin, self).__ge__(other) def __gt__(self, other): if isinstance(other, tuple): return tuple(self) > other else: return super(_SetuptoolsVersionMixin, self).__gt__(other) def __ne__(self, other): if isinstance(other, tuple): return tuple(self) != other else: return super(_SetuptoolsVersionMixin, self).__ne__(other) def __getitem__(self, key): return tuple(self)[key] def __iter__(self): component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) replace = { 'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@', }.get def _parse_version_parts(s): for part in component_re.split(s): part = replace(part, part) if not part or part == '.': continue if part[:1] in '0123456789': # pad for numeric comparison yield part.zfill(8) else: yield '*'+part # ensure that alpha/beta/candidate are before final yield '*final' def old_parse_version(s): parts = [] for part in _parse_version_parts(s.lower()): if part.startswith('*'): # remove '-' before a prerelease tag if part < '*final': while parts and parts[-1] == '*final-': parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == '00000000': parts.pop() parts.append(part) return tuple(parts) # Warn for use of this function warnings.warn( "You have iterated over the result of " "pkg_resources.parse_version. This is a legacy behavior which is " "inconsistent with the new version class introduced in setuptools " "8.0. In most cases, conversion to a tuple is unnecessary. For " "comparison of versions, sort the Version instances directly. If " "you have another use case requiring the tuple, please file a " "bug with the setuptools project describing that need.", RuntimeWarning, stacklevel=1, ) for part in old_parse_version(str(self)): yield part class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version): pass class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin, packaging.version.LegacyVersion): pass def parse_version(v): try: return SetuptoolsVersion(v) except packaging.version.InvalidVersion: return SetuptoolsLegacyVersion(v) _state_vars = {} def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_'+v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_'+_state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except ValueError: # not Mac OS X pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError', 'VersionConflict', 'DistributionNotFound', 'UnknownExtra', 'ExtractionError', # Warnings 'PEP440Warning', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__+repr(self.args) class VersionConflict(ResolutionError): """ An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. """ _template = "{self.dist} is installed but {self.req} is required" @property def dist(self): return self.args[0] @property def req(self): return self.args[1] def report(self): return self._template.format(**locals()) def with_context(self, required_by): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. """ if not required_by: return self args = self.args + (required_by,) return ContextualVersionConflict(*args) class ContextualVersionConflict(VersionConflict): """ A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. """ _template = VersionConflict._template + ' by {self.required_by}' @property def required_by(self): return self.args[2] class DistributionNotFound(ResolutionError): """A requested distribution was not found""" _template = ("The '{self.req}' distribution was not found " "and is required by {self.requirers_str}") @property def req(self): return self.args[0] @property def requirers(self): return self.args[1] @property def requirers_str(self): if not self.requirers: return 'the application' return ', '.join(self.requirers) def report(self): return self._template.format(**locals()) def __str__(self): return self.report() class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = sys.version[:3] EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macosx_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts if version == '': plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macosx_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ try: # Python 2.7 or >=3.2 from sysconfig import get_platform except ImportError: from distutils.util import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), _macosx_arch(machine)) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat get_platform = get_build_platform def compatible_platforms(provided, required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided==required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) # backward compatibility run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, string_types): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet(object): """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) @classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws @classmethod def _build_from_requirements(cls, req_spec): """ Build a working set from a requirement spec. Rewrites sys.path. """ # try it without defaults already on sys.path # by starting with an empty path ws = cls([]) reqs = parse_requirements(req_spec) dists = ws.resolve(reqs, Environment()) for dist in dists: ws.add(dist) # add any missing entries from sys.path for entry in sys.path: if entry not in ws.entries: ws.add_entry(entry) # then copy back to sys.path sys.path[:] = ws.entries return ws def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name] def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key]=1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry,[]) keys2 = self.entry_keys.setdefault(dist.location,[]) if not replace and dist.key in self.by_key: # ignore hidden distros return self.by_key[dist.key] = dist if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve(self, requirements, env=None, installer=None, replace_conflicting=False): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. """ # set up the stack requirements = list(requirements)[::-1] # set of processed requirements processed = {} # key -> dist best = {} to_activate = [] # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) while requirements: # process dependencies breadth-first req = requirements.pop(0) if req in processed: # Ignore cyclic or redundant dependencies continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match(req, ws, installer) if dist is None: requirers = required_by.get(req, None) raise DistributionNotFound(req, requirers) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency dependent_req = required_by[req] raise VersionConflict(dist, req).with_context(dependent_req) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] requirements.extend(new_requirements) # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) processed[req] = True # return list of distros to activate return to_activate def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) # add plugins+libs to sys.path map(working_set.add, distributions) # display errors print('Could not load', errors) The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) # scan project names in alphabetic order plugin_projects.sort() error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) # put all our entries in shadow_set list(map(shadow_set.add, self)) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: # save error info error_info[dist] = v if fallback: # try the next older version of project continue else: # give up on this project, keep going break else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback): """Invoke `callback` for all distributions (including existing ones)""" if callback in self.callbacks: return self.callbacks.append(callback) for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.callbacks[:] ) def __setstate__(self, e_k_b_c): entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.callbacks = callbacks[:] class Environment(object): """Searchable snapshot of distributions on a search path""" def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ return (self.python is None or dist.py_version is None or dist.py_version==self.python) \ and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self, project_name): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. """ distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added """ if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) def best_match(self, req, working_set, installer=None): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ dist = working_set.find(req) if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer) def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new # XXX backward compatibility AvailableDistributions = Environment class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir( resource_name ) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir( resource_name ) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() err = ExtractionError("""Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: %s The Python egg cache directory is currently set to: %s Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """ % (old_exc, cache_path) ) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name+'-tmp', *names) try: _bypass_ensure_directory(target_path) except: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ("%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." % path) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """Determine the default cache location This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the "Application Data" directory. On all other systems, it's "~/.python-eggs". """ try: return os.environ['PYTHON_EGG_CACHE'] except KeyError: pass if os.name!='nt': return os.path.expanduser('~/.python-eggs') # XXX this may be locale-specific! app_data = 'Application Data' app_homes = [ # best option, should be locale-safe (('APPDATA',), None), (('USERPROFILE',), app_data), (('HOMEDRIVE','HOMEPATH'), app_data), (('HOMEPATH',), app_data), (('HOME',), None), # 95/98/ME (('WINDIR',), app_data), ] for keys, subdir in app_homes: dirname = '' for key in keys: if key in os.environ: dirname = os.path.join(dirname, os.environ[key]) else: break else: if subdir: dirname = os.path.join(dirname, subdir) return os.path.join(dirname, 'Python-Eggs') else: raise RuntimeError( "Please set the PYTHON_EGG_CACHE enviroment variable" ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """ Convert an arbitrary string to a standard version string """ try: # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ','.') return re.sub('[^A-Za-z0-9.]+', '-', version) def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-','_') class MarkerEvaluation(object): values = { 'os_name': lambda: os.name, 'sys_platform': lambda: sys.platform, 'python_full_version': platform.python_version, 'python_version': lambda: platform.python_version()[:3], 'platform_version': platform.version, 'platform_machine': platform.machine, 'python_implementation': platform.python_implementation, } @classmethod def is_invalid_marker(cls, text): """ Validate text as a PEP 426 environment marker; return an exception if invalid or False otherwise. """ try: cls.evaluate_marker(text) except SyntaxError as e: return cls.normalize_exception(e) return False @staticmethod def normalize_exception(exc): """ Given a SyntaxError from a marker evaluation, normalize the error message: - Remove indications of filename and line number. - Replace platform-specific error messages with standard error messages. """ subs = { 'unexpected EOF while parsing': 'invalid syntax', 'parenthesis is never closed': 'invalid syntax', } exc.filename = None exc.lineno = None exc.msg = subs.get(exc.msg, exc.msg) return exc @classmethod def and_test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.and_, items) @classmethod def test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.or_, items) @classmethod def atom(cls, nodelist): t = nodelist[1][0] if t == token.LPAR: if nodelist[2][0] == token.RPAR: raise SyntaxError("Empty parentheses") return cls.interpret(nodelist[2]) msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @classmethod def comparison(cls, nodelist): if len(nodelist) > 4: msg = "Chained comparison not allowed in environment markers" raise SyntaxError(msg) comp = nodelist[2][1] cop = comp[1] if comp[0] == token.NAME: if len(nodelist[2]) == 3: if cop == 'not': cop = 'not in' else: cop = 'is not' try: cop = cls.get_op(cop) except KeyError: msg = repr(cop) + " operator not allowed in environment markers" raise SyntaxError(msg) return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3])) @classmethod def get_op(cls, op): ops = { symbol.test: cls.test, symbol.and_test: cls.and_test, symbol.atom: cls.atom, symbol.comparison: cls.comparison, 'not in': lambda x, y: x not in y, 'in': lambda x, y: x in y, '==': operator.eq, '!=': operator.ne, '<': operator.lt, '>': operator.gt, '<=': operator.le, '>=': operator.ge, } if hasattr(symbol, 'or_test'): ops[symbol.or_test] = cls.test return ops[op] @classmethod def evaluate_marker(cls, text, extra=None): """ Evaluate a PEP 426 environment marker on CPython 2.4+. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'parser' module, which is not implemented on Jython and has been superseded by the 'ast' module in Python 2.6 and later. """ return cls.interpret(parser.expr(text).totuple(1)[1]) @classmethod def _markerlib_evaluate(cls, text): """ Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. """ import _markerlib # markerlib implements Metadata 1.2 (PEP 345) environment markers. # Translate the variables to Metadata 2.0 (PEP 426). env = _markerlib.default_environment() for key in env.keys(): new_key = key.replace('.', '_') env[new_key] = env.pop(key) try: result = _markerlib.interpret(text, env) except NameError as e: raise SyntaxError(e.args[0]) return result if 'parser' not in globals(): # Fall back to less-complete _markerlib implementation if 'parser' module # is not available. evaluate_marker = _markerlib_evaluate @classmethod def interpret(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] try: op = cls.get_op(nodelist[0]) except KeyError: raise SyntaxError("Comparison or logical expression expected") return op(nodelist) @classmethod def evaluate(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] kind = nodelist[0] name = nodelist[1] if kind==token.NAME: try: op = cls.values[name] except KeyError: raise SyntaxError("Unknown name %r" % name) return op() if kind==token.STRING: s = nodelist[1] if not cls._safe_string(s): raise SyntaxError( "Only plain strings allowed in environment markers") return s[1:-1] msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @staticmethod def _safe_string(cand): return ( cand[:1] in "'\"" and not cand.startswith('"""') and not cand.startswith("'''") and '\\' not in cand ) invalid_marker = MarkerEvaluation.is_invalid_marker evaluate_marker = MarkerEvaluation.evaluate_marker class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def has_metadata(self, name): return self.egg_info and self._has(self._fn(self.egg_info, name)) if sys.version_info <= (3,): def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)) else: def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)).decode("utf-8") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self, name): return self.egg_info and self._isdir(self._fn(self.egg_info, name)) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) def metadata_listdir(self, name): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] def run_script(self, script_name, namespace): script = 'scripts/'+script_name if not self.has_metadata(script): raise ResolutionError("No script named %r" % script_name) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): source = open(script_filename).read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) script_code = compile(script_text, script_filename,'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): if resource_name: return os.path.join(base, *resource_name.split('/')) return base def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): NullProvider.__init__(self, module) self._setup_prefix() def _setup_prefix(self): # we assume here that our metadata may be nested inside a "basket" # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None while path!=old: if path.lower().endswith('.egg'): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path break old = path path, base = os.path.split(path) class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() register_loader_type(type(None), DefaultProvider) if importlib_machinery is not None: register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider) class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" _isdir = _has = lambda self, path: False _get = lambda self, path: '' _listdir = lambda self, path: [] module_path = None def __init__(self): pass empty_provider = EmptyProvider() class ZipManifests(dict): """ zip manifest builder """ @classmethod def build(cls, path): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ with ContextualZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), zfile.getinfo(name), ) for name in zfile.namelist() ) return dict(items) load = build class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): """ Load a manifest at path or return a suitable manifest already loaded. """ path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest class ContextualZipFile(zipfile.ZipFile): """ Supplement ZipFile class to support context manager for Python 2.6 """ def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __new__(cls, *args, **kwargs): """ Construct a ZipFile or ContextualZipFile as appropriate """ if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls) class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None _zip_manifests = MemoizedZipManifests() def __init__(self, module): EggProvider.__init__(self, module) self.zip_pre = self.loader.archive+os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( "%s is not a subpath of %s" % (fspath, self.zip_pre) ) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre+zip_path if fspath.startswith(self.egg_root+os.sep): return fspath[len(self.egg_root)+1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @property def zipinfo(self): return self._zip_manifests.load(self.loader.archive) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size # ymdhms+wday, yday, dst date_time = zip_stat.date_time + (0, 0, -1) # 1980 offset already done timestamp = time.mktime(date_time) return timestamp, size def _extract_resource(self, manager, zip_path): if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource( manager, os.path.join(zip_path, name) ) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError('"os.rename" and "os.unlink" are not supported ' 'on this platform') try: real_path = manager.get_cache_path( self.egg_name, self._parts(zip_path) ) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path # Windows, del old file and retry elif os.name=='nt': unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: # report a user-friendly error manager.extraction_error() return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size!=size or stat.st_mtime!=timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self, fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self, path): self.path = path def has_metadata(self, name): return name=='PKG-INFO' def get_metadata(self, name): if name=='PKG-INFO': with open(self.path,'rU') as f: metadata = f.read() return metadata raise KeyError("No metadata except PKG-INFO is available") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive+os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders = {}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir('/'): if subitem.endswith('.egg'): subpath = os.path.join(path_item, subitem) for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): yield dist register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object, find_nothing) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if os.path.isdir(path_item) and os.access(path_item, os.R_OK): if path_item.lower().endswith('.egg'): # unpacked egg yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item,'EGG-INFO') ) ) else: # scan for .egg and .egg-info in directory for entry in os.listdir(path_item): lower = entry.lower() if lower.endswith('.egg-info') or lower.endswith('.dist-info'): fullpath = os.path.join(path_item, entry) if os.path.isdir(fullpath): # egg-info directory, allow getting metadata metadata = PathMetadata(path_item, fullpath) else: metadata = FileMetadata(fullpath) yield Distribution.from_location( path_item, entry, metadata, precedence=DEVELOP_DIST ) elif not only and lower.endswith('.egg'): dists = find_distributions(os.path.join(path_item, entry)) for dist in dists: yield dist elif not only and lower.endswith('.egg-link'): with open(os.path.join(path_item, entry)) as entry_file: entry_lines = entry_file.readlines() for line in entry_lines: if not line.strip(): continue path = os.path.join(path_item, line.rstrip()) dists = find_distributions(path) for item in dists: yield item break register_finder(pkgutil.ImpImporter, find_on_path) if importlib_machinery is not None: register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer, path_entry, moduleName, module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module,'__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) for path_item in path: if path_item not in module.__path__: module.__path__.append(path_item) return subpath def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" _imp.acquire_lock() try: if packageName in _namespace_packages: return path, parent = sys.path, None if '.' in packageName: parent = '.'.join(packageName.split('.')[:-1]) declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError: raise TypeError("Not a package:", parent) # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent,[]).append(packageName) _namespace_packages.setdefault(packageName,[]) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: _imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: for package in _namespace_packages.get(parent,()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: _imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item)==normalized: break else: # Only return the path if it's not already there return subpath register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) if importlib_machinery is not None: register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(filename)) def _normalize_cached(filename, _cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s # whitespace and comment LINE_END = re.compile(r"\s*(#.*)?$").match # line continuation CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # Distribution or extra DISTRO = re.compile(r"\s*((\w|[-.])+)").match # ver. info VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match # comma between items COMMA = re.compile(r"\s*,").match OBRACKET = re.compile(r"\s*\[").match CBRACKET = re.compile(r"\s*\]").match MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" (?P[^-]+) ( -(?P[^-]+) ( -py(?P[^-]+) ( -(?P.+) )? )? )? """, re.VERBOSE | re.IGNORECASE, ).match class EntryPoint(object): """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, *args, **kwargs): """ Require packages for this EntryPoint, then resolve it. """ if not require or args or kwargs: warnings.warn( "Parameters to load are deprecated. Call .resolve and " ".require separately.", DeprecationWarning, stacklevel=2, ) if require: self.require(*args, **kwargs) return self.resolve() def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P.+?)\s*' r'=\s*' r'(?P[\w.]+)\s*' r'(:\s*(?P[\w.]+))?\s*' r'(?P\[.*\])?\s*$' ) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) @classmethod def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name]=ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _remove_md5_fragment(location): if not location: return '' parsed = urlparse(location) if parsed[-1].startswith('md5='): return urlunparse(parsed[:-1] + ('',)) return location class Distribution(object): """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__(self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls, location, basename, metadata=None,**kw): project_name, version, py_version, platform = [None]*4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: # .dist-info gets much metadata differently match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name','ver','pyver','plat' ) cls = _distributionImpl[ext.lower()] return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw ) @property def hashcmp(self): return ( self.parsed_version, self.precedence, self.key, _remove_md5_fragment(self.location), self.py_version or '', self.platform or '', ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): if not hasattr(self, "_parsed_version"): self._parsed_version = parse_version(self.version) return self._parsed_version def _warn_legacy_version(self): LV = packaging.version.LegacyVersion is_legacy = isinstance(self._parsed_version, LV) if not is_legacy: return # While an empty version is technically a legacy version and # is not a valid PEP 440 version, it's also unlikely to # actually come from someone and instead it is more likely that # it comes from setuptools attempting to parse a filename and # including it in the list. So for that we'll gate this warning # on if the version is anything at all or not. if not self.version: return tmpl = textwrap.dedent(""" '{project_name} ({version})' is being parsed as a legacy, non PEP 440, version. You may find odd behavior and sort order. In particular it will be sorted as less than 0.0. It is recommended to migrate to PEP 440 compatible versions. """).strip().replace('\n', ' ') warnings.warn(tmpl.format(**vars(self)), PEP440Warning) @property def version(self): try: return self._version except AttributeError: for line in self._get_metadata(self.PKG_INFO): if line.lower().startswith('version:'): self._version = safe_version(line.split(':',1)[1].strip()) return self._version else: tmpl = "Missing 'Version:' header and/or %s file" raise ValueError(tmpl % self.PKG_INFO, self) @property def _dep_map(self): try: return self.__dep_map except AttributeError: dm = self.__dep_map = {None: []} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): if extra: if ':' in extra: extra, marker = extra.split(':', 1) if invalid_marker(marker): # XXX warn reqs=[] elif not evaluate_marker(marker): reqs=[] extra = safe_extra(extra) or None dm.setdefault(extra,[]).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def activate(self, path=None): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: declare_namespace(pkg) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self, self.location) else: return str(self) def __str__(self): try: version = getattr(self, 'version', None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version) def __getattr__(self, attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" if isinstance(self.parsed_version, packaging.version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group,{}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) def insert_on(self, path, loc = None): """Insert self.location in path before its nearest parent directory""" loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath= [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: break elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p+1) except ValueError: break else: del npath[np], path[np] # ha! p = np return def check_version_conflict(self): if self.key == 'setuptools': # ignore the inevitable setuptools self-conflicts :( return nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if (modname not in sys.modules or modname in nsp or modname in _namespace_packages): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location)): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False return True def clone(self,**kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class DistInfoDistribution(Distribution): """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _preparse_requirement(self, requires_dist): """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz') Split environment marker, add == prefix to version specifiers as necessary, and remove parenthesis. """ parts = requires_dist.split(';', 1) + [''] distvers = parts[0].strip() mark = parts[1].strip() distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers) distvers = distvers.replace('(', '').replace(')', '') return (distvers, mark) def _compute_dependencies(self): """Recompute this distribution's dependencies.""" from _markerlib import compile as compile_marker dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: distvers, mark = self._preparse_requirement(req) parsed = next(parse_requirements(distvers)) parsed.marker_fn = compile_marker(mark) reqs.append(parsed) def reqs_for_extra(extra): for req in reqs: if req.marker_fn(override={'extra':extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: extra = safe_extra(extra.strip()) dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm _distributionImpl = { '.egg': Distribution, '.egg-info': Distribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args,**kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass warnings.warn(stacklevel=level + 1, *args, **kw) class RequirementParseError(ValueError): def __str__(self): return ' '.join(self.args) def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) def scan_list(ITEM, TERMINATOR, line, p, groups, item_name): items = [] while not TERMINATOR(line, p): if CONTINUE(line, p): try: line = next(lines) p = 0 except StopIteration: msg = "\\ must not appear on the last nonblank line" raise RequirementParseError(msg) match = ITEM(line, p) if not match: msg = "Expected " + item_name + " in" raise RequirementParseError(msg, line, "at", line[p:]) items.append(match.group(*groups)) p = match.end() match = COMMA(line, p) if match: # skip the comma p = match.end() elif not TERMINATOR(line, p): msg = "Expected ',' or end-of-list in" raise RequirementParseError(msg, line, "at", line[p:]) match = TERMINATOR(line, p) # skip the terminator, if any if match: p = match.end() return line, p, items for line in lines: match = DISTRO(line) if not match: raise RequirementParseError("Missing distribution spec", line) project_name = match.group(1) p = match.end() extras = [] match = OBRACKET(line, p) if match: p = match.end() line, p, extras = scan_list( DISTRO, CBRACKET, line, p, (1,), "'extra' name" ) line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2), "version spec") specs = [(op, val) for op, val in specs] yield Requirement(project_name, specs, extras) class Requirement: def __init__(self, project_name, specs, extras): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" self.unsafe_name, project_name = project_name, safe_name(project_name) self.project_name, self.key = project_name, project_name.lower() self.specifier = packaging.specifiers.SpecifierSet( ",".join(["".join([x, y]) for x, y in specs]) ) self.specs = specs self.extras = tuple(map(safe_extra, extras)) self.hashCmp = ( self.key, self.specifier, frozenset(self.extras), ) self.__hash = hash(self.hashCmp) def __str__(self): extras = ','.join(self.extras) if extras: extras = '[%s]' % extras return '%s%s%s' % (self.project_name, extras, self.specifier) def __eq__(self, other): return ( isinstance(other, Requirement) and self.hashCmp == other.hashCmp ) def __ne__(self, other): return not self == other def __contains__(self, item): if isinstance(item, Distribution): if item.key != self.key: return False item = item.version # Allow prereleases always in order to match the previous behavior of # this method. In the future this should be smarter and follow PEP 440 # more accurately. return self.specifier.contains(item, prereleases=True) def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): reqs = list(parse_requirements(s)) if reqs: if len(reqs) == 1: return reqs[0] raise ValueError("Expected only one requirement", s) raise ValueError("No requirements found", s) def _get_mro(cls): """Get an mro for a type or classic class""" if not isinstance(cls, type): class cls(cls, object): pass return cls.__mro__[1:] return cls.__mro__ def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" for t in _get_mro(getattr(ob, '__class__', type(ob))): if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, 0o755) def split_sections(s): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args,**kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open return tempfile.mkstemp(*args,**kw) finally: # and then put it back os.open = old_open # Silence the PEP440Warning by default, so that end users don't get hit by it # randomly just because they use pkg_resources. We want to append the rule # because we want earlier uses of filterwarnings to take precedence over this # one. warnings.filterwarnings("ignore", category=PEP440Warning, append=True) # from jaraco.functools 1.3 def _call_aside(f, *args, **kwargs): f(*args, **kwargs) return f @_call_aside def _initialize(g=globals()): "Set up global resource manager (deliberately not state-saved)" manager = ResourceManager() g['_manager'] = manager for name in dir(manager): if not name.startswith('_'): g[name] = getattr(manager, name) @_call_aside def _initialize_master_working_set(): """ Prepare the master working set and make the ``require()`` API available. This function has explicit effects on the global state of pkg_resources. It is intended to be invoked once at the initialization of this module. Invocation by other packages is unsupported and done at their own risk. """ working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script # backward compatibility run_main = run_script # Activate all distributions already on sys.path, and ensure that # all distributions added to the working set in the future (e.g. by # calling ``require()``) will get activated as well. add_activation_listener(lambda dist: dist.activate()) working_set.entries=[] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals()) # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .proxy_resource import ProxyResource class SubscriptionUsage(ProxyResource): """Usage Metric of a Subscription in a Location. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource ID. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar display_name: User-readable name of the metric. :vartype display_name: str :ivar current_value: Current value of the metric. :vartype current_value: float :ivar limit: Boundary value of the metric. :vartype limit: float :ivar unit: Unit of the metric. :vartype unit: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'display_name': {'readonly': True}, 'current_value': {'readonly': True}, 'limit': {'readonly': True}, 'unit': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'display_name': {'key': 'properties.displayName', 'type': 'str'}, 'current_value': {'key': 'properties.currentValue', 'type': 'float'}, 'limit': {'key': 'properties.limit', 'type': 'float'}, 'unit': {'key': 'properties.unit', 'type': 'str'}, } def __init__(self): super(SubscriptionUsage, self).__init__() self.display_name = None self.current_value = None self.limit = None self.unit = None import os from threading import Thread from tkinter import * from tkinter.ttk import * from pydub import AudioSegment from pydub.playback import play class Core(Tk): def get_w(self): return self._w class Application(Frame): def __init__(self, master=None, **kw): super().__init__(master, **kw) self.path = os.path.dirname(__file__) self.master.title("An's focus booster v2.0") self.master.minsize(300, 80) self.master.maxsize(600, 80) try: img = PhotoImage(file=os.path.join(self.path, 'icon.png')) self.master.tk.call('wm', 'iconphoto', self.master.get_w(), img) except TclError: img = PhotoImage(file=os.path.join(self.path, 'icon.gif')) self.master.tk.call('wm', 'iconphoto', self.master.get_w(), img) self.sec = self.itv = self.ses_num = self.break_num = self.ps_sec = self.se_sec = self.brk_sec = 0 self.minu = self.ps_min = self.ses_min = 25 self.is_ses = True self.brk_min = 5 self.bell = AudioSegment.from_mp3(os.path.join(self.path, 'bell.mp3')) self.time = self.master.after(0) self.style = Style() self.tbs = StringVar() self.stime = StringVar() self.t_ses_min = StringVar() self.t_se_sec = StringVar() self.t_brk_min = StringVar() self.t_brk_sec = StringVar() self.ses = StringVar() self.win_op = Toplevel(self.master) self.pb_time = Progressbar(self, orient=HORIZONTAL, mode='determinate', maximum=2520) self.widgets() def widgets(self): themes = self.style.theme_names() if 'xpnative' in themes: self.style.theme_use('xpnative') elif 'aqua' in themes: self.style.theme_use('aqua') elif 'alt' in themes: self.style.theme_use('alt') else: self.style.theme_use('default') self.style.configure('Horizontal.TProgressbar', background='#00f') self.stime.set('25:00') lb_time = Label(self, textvariable=self.stime) lb_time.grid(column=0, row=0, sticky='s') self.ses.set('Session {:02d}'.format(self.ses_num)) lb_ses = Label(self, textvariable=self.ses) lb_ses.grid(column=0, row=1, sticky='w') self.pb_time.grid(column=1, row=0, rowspan=2, padx=5, sticky='wnes') self.tbs.set('Start') btn_s = Button(self, textvariable=self.tbs, command=self.btn_start) btn_s.grid(column=2, row=0, pady=2, sticky='ne') btn_i = Button(self, text='Option', command=self.open_pref) btn_i.grid(column=2, row=1, pady=2, sticky='se') self.master.columnconfigure(0, weight=1) self.columnconfigure(1, weight=1) self.win_op.title('Preferences') self.win_op.resizable(False, False) self.win_op.protocol('WM_DELETE_WINDOW', self.ok_pref) lf_ses = Labelframe(self.win_op, text='Session Time:', padding=10) lf_ses.grid(column=0, row=0, columnspan=2) self.t_ses_min.set(25) sb_ses_min = Spinbox(lf_ses, from_=0, to=999, textvariable=self.t_ses_min, increment=1, state='readonly') sb_ses_min.grid(column=0, row=0, padx=5, pady=5) sb_se_sec = Spinbox(lf_ses, from_=0, to=59, textvariable=self.t_se_sec, increment=1, state='readonly') sb_se_sec.grid(column=0, row=1, padx=5, pady=5) lb_ses_min = Label(lf_ses, text='Minutes') lb_ses_min.grid(column=1, row=0, sticky='w') lb_se_sec = Label(lf_ses, text='Seconds') lb_se_sec.grid(column=1, row=1, sticky='w') lf_brk = Labelframe(self.win_op, text='Break Time:', padding=10) lf_brk.grid(column=0, row=1, columnspan=2) self.t_brk_min.set(5) sb_brk_min = Spinbox(lf_brk, from_=0, to=60, textvariable=self.t_brk_min, increment=1, state='readonly') sb_brk_min.grid(column=0, row=0, padx=5, pady=5) sb_brk_sec = Spinbox(lf_brk, from_=0, to=59, textvariable=self.t_brk_sec, increment=1, state='readonly') sb_brk_sec.grid(column=0, row=1, padx=5, pady=5) lb_brk_min = Label(lf_brk, text='Minutes') lb_brk_min.grid(column=1, row=0, sticky='w') lb_brk_sec = Label(lf_brk, text='Seconds') lb_brk_sec.grid(column=1, row=1, sticky='w') self.win_op.state('withdraw') def btn_start(self): if self.tbs.get() == 'Start': self.start() else: self.stop() def open_pref(self): self.win_op.state('normal') def ok_pref(self): self.ses_min = int(self.t_ses_min.get()) self.se_sec = int(self.t_se_sec.get()) self.brk_min = int(self.t_brk_min.get()) self.brk_sec = int(self.t_brk_sec.get()) self.win_op.state('withdrawn') if self.tbs.get() == 'Start': self.stime.set('{:02d}:{:02d}'.format(self.ses_min, self.se_sec)) def start(self): self.minu = self.ses_min self.sec = self.se_sec if self.minu == 0 and self.sec == 0: return self.itv = self.pb_time['maximum'] / (self.minu * 60 + self.sec) self.style.configure('Horizontal.TProgressbar', background='#00f') self.tbs.set('Stop') self.ses_num += 1 self.ses.set('Session {:02d}'.format(self.ses_num)) self.time = self.master.after(1000, self.update) def stop(self): self.pb_time['value'] = 0 self.master.after_cancel(self.time) self.tbs.set('Start') def update(self): self.sec -= 1 if self.sec < 0: self.sec = 59 self.minu -= 1 self.stime.set('{:02d}:{:02d}'.format(self.minu, self.sec)) if self.is_ses: if self.sec == 0 and self.minu == 0: self.minu = self.brk_min self.sec = self.brk_sec if self.minu == 0 and self.sec == 0: self.stop() return self.break_num += 1 if self.break_num % 4 == 0: t = (self.minu * 60 + self.sec) * 3 self.minu = t // 60 self.sec = t % 60 self.itv = self.pb_time['maximum'] / (self.minu * 60 + self.sec) self.ses.set('Break {:02d}'.format(self.break_num)) self.pb_time['value'] = self.pb_time['maximum'] self.is_ses = False self.style.configure('Horizontal.TProgressbar', background='#f0f') else: self.pb_time['value'] += self.itv if self.minu == 0 and self.sec <= 10: thread = Thread(target=play, args=(self.bell,)) thread.start() if self.style.theme_use() == 'alt': if self.pb_time['value'] / self.pb_time['maximum'] < 0.2: pass elif self.pb_time['value'] / self.pb_time['maximum'] < 0.4: self.style.configure('Horizontal.TProgressbar', background='#0ff') elif self.pb_time['value'] / self.pb_time['maximum'] < 0.6: self.style.configure('Horizontal.TProgressbar', background='#0f0') elif self.pb_time['value'] / self.pb_time['maximum'] < 0.8: self.style.configure('Horizontal.TProgressbar', background='#ff0') else: self.style.configure('Horizontal.TProgressbar', background='#f00') else: if self.sec == 0 and self.minu == 0: self.stop() self.is_ses = True return else: self.pb_time['value'] -= self.itv self.time = self.master.after(1000, self.update) if __name__ == '__main__': root = Core() app = Application(root, padding=10) app.grid(column=0, row=0, sticky='wnes') app.mainloop() # # Cython - Command Line Parsing # import os import sys import Options usage = """\ Cython (http://cython.org) is a compiler for code written in the Cython language. Cython is based on Pyrex by Greg Ewing. Usage: cython [options] sourcefile.{pyx,py} ... Options: -V, --version Display version number of cython compiler -l, --create-listing Write error messages to a listing file -I, --include-dir Search for include files in named directory (multiple include directories are allowed). -o, --output-file Specify name of generated C file -t, --timestamps Only compile newer source files -f, --force Compile all source files (overrides implied -t) -v, --verbose Be verbose, print file names on multiple compilation -p, --embed-positions If specified, the positions in Cython files of each function definition is embedded in its docstring. --cleanup Release interned objects on python exit, for memory debugging. Level indicates aggressiveness, default 0 releases nothing. -w, --working Sets the working directory for Cython (the directory modules are searched from) --gdb Output debug information for cygdb --gdb-outdir Specify gdb debug information output directory. Implies --gdb. -D, --no-docstrings Strip docstrings from the compiled module. -a, --annotate Produce a colorized HTML version of the source. --line-directives Produce #line directives pointing to the .pyx source --cplus Output a C++ rather than C file. --embed[=] Generate a main() function that embeds the Python interpreter. -2 Compile based on Python-2 syntax and code semantics. -3 Compile based on Python-3 syntax and code semantics. --lenient Change some compile time errors to runtime errors to improve Python compatibility --capi-reexport-cincludes Add cincluded headers to any auto-generated header files. --fast-fail Abort the compilation on the first error --warning-errors, -Werror Make all warnings into errors --warning-extra, -Wextra Enable extra warnings -X, --directive =[, 1: sys.stderr.write( "cython: Only one source file allowed when using -o\n") sys.exit(1) if len(sources) == 0 and not options.show_version: bad_usage() if Options.embed and len(sources) > 1: sys.stderr.write( "cython: Only one source file allowed when using -embed\n") sys.exit(1) return options, sources #!/usr/bin/env python """ @package ion.agents.data.test.test_external_dataset_agent_slocum @file ion/agents/data/test/test_external_dataset_agent_slocum.py @author Christopher Mueller @brief """ # Import pyon first for monkey patching. from pyon.public import log, IonObject from pyon.ion.resource import PRED, RT from interface.services.dm.idataset_management_service import DatasetManagementServiceClient from interface.services.sa.idata_product_management_service import DataProductManagementServiceClient from interface.services.sa.idata_acquisition_management_service import DataAcquisitionManagementServiceClient from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource from ion.services.dm.utility.granule_utils import time_series_domain from ion.agents.data.test.test_external_dataset_agent import ExternalDatasetAgentTestBase, IonIntegrationTestCase from nose.plugins.attrib import attr #temp until stream defs are completed from interface.services.dm.ipubsub_management_service import\ PubsubManagementServiceClient from coverage_model.parameter import ParameterDictionary, ParameterContext from coverage_model.parameter_types import QuantityType import numpy #DISABLED: attr('INT_LONG', group='eoi') # these tests rely on the original handler mechanism which had several shortcomings leading to the poller/parser rewrite class TestExternalDatasetAgent_Slocum(ExternalDatasetAgentTestBase, IonIntegrationTestCase): DVR_CONFIG = { 'dvr_mod': 'ion.agents.data.handlers.slocum_data_handler', 'dvr_cls': 'SlocumDataHandler', } HIST_CONSTRAINTS_1 = {} HIST_CONSTRAINTS_2 = {} def _setup_resources(self): # TODO: some or all of this (or some variation) should move to DAMS' # Build the test resources for the dataset dms_cli = DatasetManagementServiceClient() dams_cli = DataAcquisitionManagementServiceClient() dpms_cli = DataProductManagementServiceClient() rr_cli = ResourceRegistryServiceClient() pubsub_cli = PubsubManagementServiceClient() eda = ExternalDatasetAgent(name='example dataset agent', handler_module=self.DVR_CONFIG['dvr_mod'], handler_class=self.DVR_CONFIG['dvr_cls']) eda_id = dams_cli.create_external_dataset_agent(eda) eda_inst = ExternalDatasetAgentInstance(name='example dataset agent instance') eda_inst_id = dams_cli.create_external_dataset_agent_instance(eda_inst, external_dataset_agent_id=eda_id) # Create and register the necessary resources/objects # Create DataProvider dprov = ExternalDataProvider(name='example data provider', institution=Institution(), contact=ContactInformation()) dprov.contact.individual_names_given = 'Christopher Mueller' dprov.contact.email = 'cmueller@asascience.com' # Create DataSource dsrc = DataSource(name='example datasource', protocol_type='FILE', institution=Institution(), contact=ContactInformation()) dsrc.connection_params['base_data_url'] = '' dsrc.contact.individual_names_given = 'Tim Giguere' dsrc.contact.email = 'tgiguere@asascience.com' # Create ExternalDataset ds_name = 'slocum_test_dataset' dset = ExternalDataset(name=ds_name, dataset_description=DatasetDescription(), update_description=UpdateDescription(), contact=ContactInformation()) dset.dataset_description.parameters['base_url'] = 'test_data/slocum/' dset.dataset_description.parameters['list_pattern'] = 'ru05-2012-021-0-0-sbd.dat' dset.dataset_description.parameters['date_pattern'] = '%Y %j' dset.dataset_description.parameters['date_extraction_pattern'] = 'ru05-([\d]{4})-([\d]{3})-\d-\d-sbd.dat' dset.dataset_description.parameters['temporal_dimension'] = None dset.dataset_description.parameters['zonal_dimension'] = None dset.dataset_description.parameters['meridional_dimension'] = None dset.dataset_description.parameters['vertical_dimension'] = None dset.dataset_description.parameters['variables'] = [ 'c_wpt_y_lmc', 'sci_water_cond', 'm_y_lmc', 'u_hd_fin_ap_inflection_holdoff', 'sci_m_present_time', 'm_leakdetect_voltage_forward', 'sci_bb3slo_b660_scaled', 'c_science_send_all', 'm_gps_status', 'm_water_vx', 'm_water_vy', 'c_heading', 'sci_fl3slo_chlor_units', 'u_hd_fin_ap_gain', 'm_vacuum', 'u_min_water_depth', 'm_gps_lat', 'm_veh_temp', 'f_fin_offset', 'u_hd_fin_ap_hardover_holdoff', 'c_alt_time', 'm_present_time', 'm_heading', 'sci_bb3slo_b532_scaled', 'sci_fl3slo_cdom_units', 'm_fin', 'x_cycle_overrun_in_ms', 'sci_water_pressure', 'u_hd_fin_ap_igain', 'sci_fl3slo_phyco_units', 'm_battpos', 'sci_bb3slo_b470_scaled', 'm_lat', 'm_gps_lon', 'sci_ctd41cp_timestamp', 'm_pressure', 'c_wpt_x_lmc', 'c_ballast_pumped', 'x_lmc_xy_source', 'm_lon', 'm_avg_speed', 'sci_water_temp', 'u_pitch_ap_gain', 'm_roll', 'm_tot_num_inflections', 'm_x_lmc', 'u_pitch_ap_deadband', 'm_final_water_vy', 'm_final_water_vx', 'm_water_depth', 'm_leakdetect_voltage', 'u_pitch_max_delta_battpos', 'm_coulomb_amphr', 'm_pitch', ] # Create DataSourceModel dsrc_model = DataSourceModel(name='slocum_model') # dsrc_model.model = 'SLOCUM' dsrc_model.data_handler_module = 'N/A' dsrc_model.data_handler_class = 'N/A' ## Run everything through DAMS ds_id = dams_cli.create_external_dataset(external_dataset=dset) ext_dprov_id = dams_cli.create_external_data_provider(external_data_provider=dprov) ext_dsrc_id = dams_cli.create_data_source(data_source=dsrc) ext_dsrc_model_id = dams_cli.create_data_source_model(dsrc_model) # Register the ExternalDataset dproducer_id = dams_cli.register_external_data_set(external_dataset_id=ds_id) # Or using each method dams_cli.assign_data_source_to_external_data_provider(data_source_id=ext_dsrc_id, external_data_provider_id=ext_dprov_id) dams_cli.assign_data_source_to_data_model(data_source_id=ext_dsrc_id, data_source_model_id=ext_dsrc_model_id) dams_cli.assign_external_dataset_to_data_source(external_dataset_id=ds_id, data_source_id=ext_dsrc_id) dams_cli.assign_external_dataset_to_agent_instance(external_dataset_id=ds_id, agent_instance_id=eda_inst_id) # dams_cli.assign_external_data_agent_to_agent_instance(external_data_agent_id=self.eda_id, agent_instance_id=self.eda_inst_id) #create temp streamdef so the data product can create the stream pc_list = [] for pc_k, pc in self._create_parameter_dictionary().iteritems(): pc_list.append(dms_cli.create_parameter_context(pc_k, pc[1].dump())) pdict_id = dms_cli.create_parameter_dictionary('slocum_param_dict', pc_list) streamdef_id = pubsub_cli.create_stream_definition(name="slocum_stream_def", description="stream def for slocum testing", parameter_dictionary_id=pdict_id) # dpms_cli.create_data_product() # Generate the data product and associate it to the ExternalDataset dprod = IonObject(RT.DataProduct, name='slocum_parsed_product', description='parsed slocum product') dproduct_id = dpms_cli.create_data_product(data_product=dprod, stream_definition_id=streamdef_id) dams_cli.assign_data_product(input_resource_id=ds_id, data_product_id=dproduct_id) stream_id, assn = rr_cli.find_objects(subject=dproduct_id, predicate=PRED.hasStream, object_type=RT.Stream, id_only=True) stream_id = stream_id[0] log.info('Created resources: {0}'.format({'ExternalDataset': ds_id, 'ExternalDataProvider': ext_dprov_id, 'DataSource': ext_dsrc_id, 'DataSourceModel': ext_dsrc_model_id, 'DataProducer': dproducer_id, 'DataProduct': dproduct_id, 'Stream': stream_id})) # Create the logger for receiving publications _, stream_route, _ = self.create_stream_and_logger(name='slocum', stream_id=stream_id) self.EDA_RESOURCE_ID = ds_id self.EDA_NAME = ds_name self.DVR_CONFIG['dh_cfg'] = { 'TESTING': True, 'stream_id': stream_id, 'stream_route': stream_route, 'stream_def': streamdef_id, 'external_dataset_res': dset, 'data_producer_id': dproducer_id, # CBM: Should this be put in the main body of the config - with mod & cls? 'max_records': 20, } def _create_parameter_dictionary(self): pdict = ParameterDictionary() t_ctxt = ParameterContext('c_wpt_y_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_water_cond', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_y_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('u_hd_fin_ap_inflection_holdoff', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_m_present_time', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_leakdetect_voltage_forward', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_bb3slo_b660_scaled', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('c_science_send_all', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_gps_status', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_water_vx', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_water_vy', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('c_heading', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_fl3slo_chlor_units', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('u_hd_fin_ap_gain', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_vacuum', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('u_min_water_depth', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_gps_lat', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_veh_temp', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('f_fin_offset', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('u_hd_fin_ap_hardover_holdoff', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('c_alt_time', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_present_time', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_heading', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_bb3slo_b532_scaled', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_fl3slo_cdom_units', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_fin', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('x_cycle_overrun_in_ms', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_water_pressure', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('u_hd_fin_ap_igain', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_fl3slo_phyco_units', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_battpos', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_bb3slo_b470_scaled', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_lat', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_gps_lon', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_ctd41cp_timestamp', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_pressure', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('c_wpt_x_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('c_ballast_pumped', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('x_lmc_xy_source', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_lon', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_avg_speed', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('sci_water_temp', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('u_pitch_ap_gain', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_roll', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_tot_num_inflections', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_x_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('u_pitch_ap_deadband', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_final_water_vy', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_final_water_vx', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_water_depth', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_leakdetect_voltage', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('u_pitch_max_delta_battpos', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_coulomb_amphr', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) t_ctxt = ParameterContext('m_pitch', param_type=QuantityType(value_encoding=numpy.dtype('float32'))) t_ctxt.uom = 'unknown' pdict.add_context(t_ctxt) return pdict from django.contrib import messages from django.core.urlresolvers import reverse, reverse_lazy from django.http import HttpResponseRedirect from django.views.generic import ListView, CreateView, DeleteView from questionnaire.forms.questions import QuestionForm from questionnaire.models import Question, Questionnaire class QuestionList(ListView): template_name = 'questions/index.html' model = Question object_list = Question.objects.all() def get(self, *args, **kwargs): finalized_questionnaire = Questionnaire.objects.filter(status=Questionnaire.FINALIZED) active_questions = None if finalized_questionnaire.exists(): active_questions = finalized_questionnaire.latest('created').get_all_questions() context = {'request': self.request, 'questions': self.model.objects.all(), 'active_questions': active_questions} return self.render_to_response(context) class CreateQuestion(CreateView): def __init__(self, **kwargs): super(CreateQuestion, self).__init__(**kwargs) self.template_name = 'questions/new.html' self.object = Question self.model = Question self.form_class = QuestionForm self.form = None def get_context_data(self, **kwargs): context = super(CreateQuestion, self).get_context_data(**kwargs) context.update({'btn_label': 'CREATE', 'id': 'id-new-question-form'}) return context def post(self, request, *args, **kwargs): self.form = QuestionForm(data=request.POST) if self.form.is_valid(): return self._form_valid() return self._form_invalid() def _form_valid(self): self.form.save() messages.success(self.request, "Question successfully created.") return HttpResponseRedirect(reverse('list_questions_page')) def _form_invalid(self): messages.error(self.request, "Question NOT created. See errors below.") context = {'form': self.form, 'btn_label': "CREATE", 'id': 'id-new-question-form'} return self.render_to_response(context) class DeleteQuestion(DeleteView): model = Question def post(self, *args, **kwargs): question = self.model.objects.get(pk=kwargs['question_id']) if question.can_be_deleted(): question.delete() messages.success(self.request, "Question was deleted successfully") return HttpResponseRedirect(reverse_lazy('list_questions_page')) messages.error(self.request, "Question was not deleted because it has responses") return HttpResponseRedirect(reverse_lazy('list_questions_page')) from ctypes import POINTER, c_char_p, c_double, c_int, c_void_p from django.contrib.gis.gdal.envelope import OGREnvelope from django.contrib.gis.gdal.libgdal import lgdal from django.contrib.gis.gdal.prototypes.errcheck import check_envelope from django.contrib.gis.gdal.prototypes.generation import ( const_string_output, double_output, geom_output, int_output, srs_output, string_output, void_output, ) # ### Generation routines specific to this module ### def env_func(f, argtypes): "For getting OGREnvelopes." f.argtypes = argtypes f.restype = None f.errcheck = check_envelope return f def pnt_func(f): "For accessing point information." return double_output(f, [c_void_p, c_int]) def topology_func(f): f.argtypes = [c_void_p, c_void_p] f.restype = c_int f.errcheck = lambda result, func, cargs: bool(result) return f # ### OGR_G ctypes function prototypes ### # GeoJSON routines. from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p]) to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True, decoding='ascii') to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True, decoding='ascii') # GetX, GetY, GetZ all return doubles. getx = pnt_func(lgdal.OGR_G_GetX) gety = pnt_func(lgdal.OGR_G_GetY) getz = pnt_func(lgdal.OGR_G_GetZ) # Geometry creation routines. from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2) from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1) create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int]) clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p]) get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int]) get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p]) geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p]) geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p]) geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p]) geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p]) geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p]) # Geometry modification routines. add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p]) import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)]) # Destroys a geometry destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False) # Geometry export routines. to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB. to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)], decoding='ascii') to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True, decoding='ascii') get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p]) # Geometry spatial-reference related routines. assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False) get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p]) # Geometry properties get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p]) get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p]) get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p]) get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p]) set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False) get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p]) get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p], decoding='ascii') get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p]) get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p]) get_point = void_output( lgdal.OGR_G_GetPoint, [c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False ) geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False) # Topology routines. ogr_contains = topology_func(lgdal.OGR_G_Contains) ogr_crosses = topology_func(lgdal.OGR_G_Crosses) ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint) ogr_equals = topology_func(lgdal.OGR_G_Equals) ogr_intersects = topology_func(lgdal.OGR_G_Intersects) ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps) ogr_touches = topology_func(lgdal.OGR_G_Touches) ogr_within = topology_func(lgdal.OGR_G_Within) # Transformation routines. geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p]) geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p]) # For retrieving the envelope of the geometry. get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)]) # Copyright 2009-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Exceptions raised by the BSON package.""" class BSONError(Exception): """Base class for all BSON exceptions. """ class InvalidBSON(BSONError): """Raised when trying to create a BSON object from invalid data. """ class InvalidStringData(BSONError): """Raised when trying to encode a string containing non-UTF8 data. """ class InvalidDocument(BSONError): """Raised when trying to create a BSON object from an invalid document. """ class InvalidId(BSONError): """Raised when trying to create an ObjectId from invalid data. """ # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility functions for the graph_editor. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re from six import iteritems from tensorflow.python.framework import ops as tf_ops from tensorflow.python.ops import array_ops as tf_array_ops __all__ = [ "make_list_of_op", "get_tensors", "make_list_of_t", "get_generating_ops", "get_consuming_ops", "ControlOutputs", "placeholder_name", "make_placeholder_from_tensor", "make_placeholder_from_dtype_and_shape", ] # The graph editor sometimes need to create placeholders, they are named # "geph_*". "geph" stands for Graph-Editor PlaceHolder. _DEFAULT_PLACEHOLDER_PREFIX = "geph" def concatenate_unique(la, lb): """Add all the elements of `lb` to `la` if they are not there already. The elements added to `la` maintain ordering with respect to `lb`. Args: la: List of Python objects. lb: List of Python objects. Returns: `la`: The list `la` with missing elements from `lb`. """ la_set = set(la) for l in lb: if l not in la_set: la.append(l) la_set.add(l) return la # TODO(fkp): very generic code, it should be moved in a more generic place. class ListView(object): """Immutable list wrapper. This class is strongly inspired by the one in tf.Operation. """ def __init__(self, list_): if not isinstance(list_, list): raise TypeError("Expected a list, got: {}.".format(type(list_))) self._list = list_ def __iter__(self): return iter(self._list) def __len__(self): return len(self._list) def __bool__(self): return bool(self._list) # Python 3 wants __bool__, Python 2.7 wants __nonzero__ __nonzero__ = __bool__ def __getitem__(self, i): return self._list[i] def __add__(self, other): if not isinstance(other, list): other = list(other) return list(self) + other # TODO(fkp): very generic code, it should be moved in a more generic place. def is_iterable(obj): """Return true if the object is iterable.""" if isinstance(obj, tf_ops.Tensor): return False try: _ = iter(obj) except Exception: # pylint: disable=broad-except return False return True def flatten_tree(tree, leaves=None): """Flatten a tree into a list. Args: tree: iterable or not. If iterable, its elements (child) can also be iterable or not. leaves: list to which the tree leaves are appended (None by default). Returns: A list of all the leaves in the tree. """ if leaves is None: leaves = [] if isinstance(tree, dict): for _, child in iteritems(tree): flatten_tree(child, leaves) elif is_iterable(tree): for child in tree: flatten_tree(child, leaves) else: leaves.append(tree) return leaves def transform_tree(tree, fn, iterable_type=tuple): """Transform all the nodes of a tree. Args: tree: iterable or not. If iterable, its elements (child) can also be iterable or not. fn: function to apply to each leaves. iterable_type: type use to construct the resulting tree for unknown iterable, typically `list` or `tuple`. Returns: A tree whose leaves has been transformed by `fn`. The hierarchy of the output tree mimics the one of the input tree. """ if is_iterable(tree): if isinstance(tree, dict): res = tree.__new__(type(tree)) res.__init__( (k, transform_tree(child, fn)) for k, child in iteritems(tree)) return res elif isinstance(tree, tuple): # NamedTuple? if hasattr(tree, "_asdict"): res = tree.__new__(type(tree), **transform_tree(tree._asdict(), fn)) else: res = tree.__new__(type(tree), (transform_tree(child, fn) for child in tree)) return res elif isinstance(tree, collections.Sequence): res = tree.__new__(type(tree)) res.__init__(transform_tree(child, fn) for child in tree) return res else: return iterable_type(transform_tree(child, fn) for child in tree) else: return fn(tree) def check_graphs(*args): """Check that all the element in args belong to the same graph. Args: *args: a list of object with a obj.graph property. Raises: ValueError: if all the elements do not belong to the same graph. """ graph = None for i, sgv in enumerate(args): if graph is None and sgv.graph is not None: graph = sgv.graph elif sgv.graph is not None and sgv.graph is not graph: raise ValueError("Argument[{}]: Wrong graph!".format(i)) def get_unique_graph(tops, check_types=None, none_if_empty=False): """Return the unique graph used by the all the elements in tops. Args: tops: list of elements to check (usually a list of tf.Operation and/or tf.Tensor). Or a tf.Graph. check_types: check that the element in tops are of given type(s). If None, the types (tf.Operation, tf.Tensor) are used. none_if_empty: don't raise an error if tops is an empty list, just return None. Returns: The unique graph used by all the tops. Raises: TypeError: if tops is not a iterable of tf.Operation. ValueError: if the graph is not unique. """ if isinstance(tops, tf_ops.Graph): return tops if not is_iterable(tops): raise TypeError("{} is not iterable".format(type(tops))) if check_types is None: check_types = (tf_ops.Operation, tf_ops.Tensor) elif not is_iterable(check_types): check_types = (check_types,) g = None for op in tops: if not isinstance(op, check_types): raise TypeError("Expected a type in ({}), got: {}".format(", ".join([str( t) for t in check_types]), type(op))) if g is None: g = op.graph elif g is not op.graph: raise ValueError("Operation {} does not belong to given graph".format(op)) if g is None and not none_if_empty: raise ValueError("Can't find the unique graph of an empty list") return g def make_list_of_op(ops, check_graph=True, allow_graph=True, ignore_ts=False): """Convert ops to a list of `tf.Operation`. Args: ops: can be an iterable of `tf.Operation`, a `tf.Graph` or a single operation. check_graph: if `True` check if all the operations belong to the same graph. allow_graph: if `False` a `tf.Graph` cannot be converted. ignore_ts: if True, silently ignore `tf.Tensor`. Returns: A newly created list of `tf.Operation`. Raises: TypeError: if ops cannot be converted to a list of `tf.Operation` or, if `check_graph` is `True`, if all the ops do not belong to the same graph. """ if isinstance(ops, tf_ops.Graph): if allow_graph: return ops.get_operations() else: raise TypeError("allow_graph is False: cannot convert a tf.Graph.") else: if not is_iterable(ops): ops = [ops] if not ops: return [] if check_graph: check_types = None if ignore_ts else tf_ops.Operation get_unique_graph(ops, check_types=check_types) return [op for op in ops if isinstance(op, tf_ops.Operation)] # TODO(fkp): move this function in tf.Graph? def get_tensors(graph): """get all the tensors which are input or output of an op in the graph. Args: graph: a `tf.Graph`. Returns: A list of `tf.Tensor`. Raises: TypeError: if graph is not a `tf.Graph`. """ if not isinstance(graph, tf_ops.Graph): raise TypeError("Expected a graph, got: {}".format(type(graph))) ts = [] for op in graph.get_operations(): ts += op.outputs return ts def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False): """Convert ts to a list of `tf.Tensor`. Args: ts: can be an iterable of `tf.Tensor`, a `tf.Graph` or a single tensor. check_graph: if `True` check if all the tensors belong to the same graph. allow_graph: if `False` a `tf.Graph` cannot be converted. ignore_ops: if `True`, silently ignore `tf.Operation`. Returns: A newly created list of `tf.Tensor`. Raises: TypeError: if `ts` cannot be converted to a list of `tf.Tensor` or, if `check_graph` is `True`, if all the ops do not belong to the same graph. """ if isinstance(ts, tf_ops.Graph): if allow_graph: return get_tensors(ts) else: raise TypeError("allow_graph is False: cannot convert a tf.Graph.") else: if not is_iterable(ts): ts = [ts] if not ts: return [] if check_graph: check_types = None if ignore_ops else tf_ops.Tensor get_unique_graph(ts, check_types=check_types) return [t for t in ts if isinstance(t, tf_ops.Tensor)] def get_generating_ops(ts): """Return all the generating ops of the tensors in `ts`. Args: ts: a list of `tf.Tensor` Returns: A list of all the generating `tf.Operation` of the tensors in `ts`. Raises: TypeError: if `ts` cannot be converted to a list of `tf.Tensor`. """ ts = make_list_of_t(ts, allow_graph=False) return [t.op for t in ts] def get_consuming_ops(ts): """Return all the consuming ops of the tensors in ts. Args: ts: a list of `tf.Tensor` Returns: A list of all the consuming `tf.Operation` of the tensors in `ts`. Raises: TypeError: if ts cannot be converted to a list of `tf.Tensor`. """ ts = make_list_of_t(ts, allow_graph=False) ops = [] for t in ts: for op in t.consumers(): if op not in ops: ops.append(op) return ops class ControlOutputs(object): """The control outputs topology.""" def __init__(self, graph): """Create a dictionary of control-output dependencies. Args: graph: a `tf.Graph`. Returns: A dictionary where a key is a `tf.Operation` instance and the corresponding value is a list of all the ops which have the key as one of their control-input dependencies. Raises: TypeError: graph is not a `tf.Graph`. """ if not isinstance(graph, tf_ops.Graph): raise TypeError("Expected a tf.Graph, got: {}".format(type(graph))) self._control_outputs = {} self._graph = graph self._version = None self._build() def update(self): """Update the control outputs if the graph has changed.""" if self._version != self._graph.version: self._build() return self def _build(self): """Build the control outputs dictionary.""" self._control_outputs.clear() ops = self._graph.get_operations() for op in ops: for control_input in op.control_inputs: if control_input not in self._control_outputs: self._control_outputs[control_input] = [] if op not in self._control_outputs[control_input]: self._control_outputs[control_input].append(op) self._version = self._graph.version def get_all(self): return self._control_outputs def get(self, op): """return the control outputs of op.""" if op in self._control_outputs: return self._control_outputs[op] else: return () @property def graph(self): return self._graph def scope_finalize(scope): if scope and scope[-1] != "/": scope += "/" return scope def scope_dirname(scope): slash = scope.rfind("/") if slash == -1: return "" return scope[:slash + 1] def scope_basename(scope): slash = scope.rfind("/") if slash == -1: return scope return scope[slash + 1:] def placeholder_name(t=None, scope=None, prefix=_DEFAULT_PLACEHOLDER_PREFIX): """Create placeholder name for the graph editor. Args: t: optional tensor on which the placeholder operation's name will be based on scope: absolute scope with which to prefix the placeholder's name. None means that the scope of t is preserved. "" means the root scope. prefix: placeholder name prefix. Returns: A new placeholder name prefixed by "geph". Note that "geph" stands for Graph Editor PlaceHolder. This convention allows to quickly identify the placeholder generated by the Graph Editor. Raises: TypeError: if t is not None or a tf.Tensor. """ if scope is not None: scope = scope_finalize(scope) if t is not None: if not isinstance(t, tf_ops.Tensor): raise TypeError("Expected a tf.Tenfor, got: {}".format(type(t))) op_dirname = scope_dirname(t.op.name) op_basename = scope_basename(t.op.name) if scope is None: scope = op_dirname if op_basename.startswith("{}__".format(prefix)): ph_name = op_basename else: ph_name = "{}__{}_{}".format(prefix, op_basename, t.value_index) return scope + ph_name else: if scope is None: scope = "" return "{}{}".format(scope, prefix) def make_placeholder_from_tensor(t, scope=None, prefix=_DEFAULT_PLACEHOLDER_PREFIX): """Create a `tf.placeholder` for the Graph Editor. Note that the correct graph scope must be set by the calling function. Args: t: a `tf.Tensor` whose name will be used to create the placeholder (see function placeholder_name). scope: absolute scope within which to create the placeholder. None means that the scope of `t` is preserved. `""` means the root scope. prefix: placeholder name prefix. Returns: A newly created `tf.placeholder`. Raises: TypeError: if `t` is not `None` or a `tf.Tensor`. """ return tf_array_ops.placeholder( dtype=t.dtype, shape=t.get_shape(), name=placeholder_name(t, scope=scope, prefix=prefix)) def make_placeholder_from_dtype_and_shape(dtype, shape=None, scope=None, prefix=_DEFAULT_PLACEHOLDER_PREFIX): """Create a tf.placeholder for the Graph Editor. Note that the correct graph scope must be set by the calling function. The placeholder is named using the function placeholder_name (with no tensor argument). Args: dtype: the tensor type. shape: the tensor shape (optional). scope: absolute scope within which to create the placeholder. None means that the scope of t is preserved. "" means the root scope. prefix: placeholder name prefix. Returns: A newly created tf.placeholder. """ return tf_array_ops.placeholder( dtype=dtype, shape=shape, name=placeholder_name(scope=scope, prefix=prefix)) _INTERNAL_VARIABLE_RE = re.compile(r"^__\w+__$") def get_predefined_collection_names(): """Return all the predefined collection names.""" return [getattr(tf_ops.GraphKeys, key) for key in dir(tf_ops.GraphKeys) if not _INTERNAL_VARIABLE_RE.match(key)] def find_corresponding_elem(target, dst_graph, dst_scope="", src_scope=""): """Find corresponding op/tensor in a different graph. Args: target: A `tf.Tensor` or a `tf.Operation` belonging to the original graph. dst_graph: The graph in which the corresponding graph element must be found. dst_scope: A scope which is prepended to the name to look for. src_scope: A scope which is removed from the original of `target` name. Returns: The corresponding tf.Tensor` or a `tf.Operation`. Raises: ValueError: if `src_name` does not start with `src_scope`. TypeError: if `target` is not a `tf.Tensor` or a `tf.Operation` KeyError: If the corresponding graph element cannot be found. """ src_name = target.name if src_scope: src_scope = scope_finalize(src_scope) if not src_name.startswidth(src_scope): raise ValueError("{} does not start with {}".format(src_name, src_scope)) src_name = src_name[len(src_scope):] dst_name = src_name if dst_scope: dst_scope = scope_finalize(dst_scope) dst_name = dst_scope + dst_name if isinstance(target, tf_ops.Tensor): return dst_graph.get_tensor_by_name(dst_name) if isinstance(target, tf_ops.Operation): return dst_graph.get_operation_by_name(dst_name) raise TypeError("Expected tf.Tensor or tf.Operation, got: {}", type(target)) def find_corresponding(targets, dst_graph, dst_scope="", src_scope=""): """Find corresponding ops/tensors in a different graph. `targets` is a Python tree, that is, a nested structure of iterable (list, tupple, dictionary) whose leaves are instances of `tf.Tensor` or `tf.Operation` Args: targets: A Python tree containing `tf.Tensor` or `tf.Operation` belonging to the original graph. dst_graph: The graph in which the corresponding graph element must be found. dst_scope: A scope which is prepended to the name to look for. src_scope: A scope which is removed from the original of `top` name. Returns: A Python tree containin the corresponding tf.Tensor` or a `tf.Operation`. Raises: ValueError: if `src_name` does not start with `src_scope`. TypeError: if `top` is not a `tf.Tensor` or a `tf.Operation` KeyError: If the corresponding graph element cannot be found. """ def func(top): return find_corresponding_elem(top, dst_graph, dst_scope, src_scope) return transform_tree(targets, func) # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Represents an EC2 Spot Instance Request """ from boto.ec2.ec2object import TaggedEC2Object from boto.ec2.launchspecification import LaunchSpecification class SpotInstanceStateFault(object): """ The fault codes for the Spot Instance request, if any. :ivar code: The reason code for the Spot Instance state change. :ivar message: The message for the Spot Instance state change. """ def __init__(self, code=None, message=None): self.code = code self.message = message def __repr__(self): return '(%s, %s)' % (self.code, self.message) def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'code': self.code = value elif name == 'message': self.message = value setattr(self, name, value) class SpotInstanceStatus(object): """ Contains the status of a Spot Instance Request. :ivar code: Status code of the request. :ivar message: The description for the status code for the Spot request. :ivar update_time: Time the status was stated. """ def __init__(self, code=None, update_time=None, message=None): self.code = code self.update_time = update_time self.message = message def __repr__(self): return '' % self.code def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'code': self.code = value elif name == 'message': self.message = value elif name == 'updateTime': self.update_time = value class SpotInstanceRequest(TaggedEC2Object): """ :ivar id: The ID of the Spot Instance Request. :ivar price: The maximum hourly price for any Spot Instance launched to fulfill the request. :ivar type: The Spot Instance request type. :ivar state: The state of the Spot Instance request. :ivar fault: The fault codes for the Spot Instance request, if any. :ivar valid_from: The start date of the request. If this is a one-time request, the request becomes active at this date and time and remains active until all instances launch, the request expires, or the request is canceled. If the request is persistent, the request becomes active at this date and time and remains active until it expires or is canceled. :ivar valid_until: The end date of the request. If this is a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date is reached. :ivar launch_group: The instance launch group. Launch groups are Spot Instances that launch together and terminate together. :ivar launched_availability_zone: foo :ivar product_description: The Availability Zone in which the bid is launched. :ivar availability_zone_group: The Availability Zone group. If you specify the same Availability Zone group for all Spot Instance requests, all Spot Instances are launched in the same Availability Zone. :ivar create_time: The time stamp when the Spot Instance request was created. :ivar launch_specification: Additional information for launching instances. :ivar instance_id: The instance ID, if an instance has been launched to fulfill the Spot Instance request. :ivar status: The status code and status message describing the Spot Instance request. """ def __init__(self, connection=None): super(SpotInstanceRequest, self).__init__(connection) self.id = None self.price = None self.type = None self.state = None self.fault = None self.valid_from = None self.valid_until = None self.launch_group = None self.launched_availability_zone = None self.product_description = None self.availability_zone_group = None self.create_time = None self.launch_specification = None self.instance_id = None self.status = None def __repr__(self): return 'SpotInstanceRequest:%s' % self.id def startElement(self, name, attrs, connection): retval = super(SpotInstanceRequest, self).startElement(name, attrs, connection) if retval is not None: return retval if name == 'launchSpecification': self.launch_specification = LaunchSpecification(connection) return self.launch_specification elif name == 'fault': self.fault = SpotInstanceStateFault() return self.fault elif name == 'status': self.status = SpotInstanceStatus() return self.status else: return None def endElement(self, name, value, connection): if name == 'spotInstanceRequestId': self.id = value elif name == 'spotPrice': self.price = float(value) elif name == 'type': self.type = value elif name == 'state': self.state = value elif name == 'validFrom': self.valid_from = value elif name == 'validUntil': self.valid_until = value elif name == 'launchGroup': self.launch_group = value elif name == 'availabilityZoneGroup': self.availability_zone_group = value elif name == 'launchedAvailabilityZone': self.launched_availability_zone = value elif name == 'instanceId': self.instance_id = value elif name == 'createTime': self.create_time = value elif name == 'productDescription': self.product_description = value else: setattr(self, name, value) def cancel(self, dry_run=False): self.connection.cancel_spot_instance_requests( [self.id], dry_run=dry_run ) import math import keras import theano import theano.tensor as T import numpy def list_assert_equal(a, b, round_to=7): ''' This will do a pairwise, rounded equality test across two lists of numbers. ''' pairs = zip(a, b) for i, j in pairs: assert round(i, round_to) == round(j, round_to) def get_standard_values(): ''' These are just a set of floats used for testing the activation functions, and are useful in multiple tests. ''' return [0,0.1,0.5,0.9,1.0] def test_softmax(): from keras.activations import softmax as s # Test using a reference implementation of softmax def softmax(values): m = max(values) values = numpy.array(values) e = numpy.exp(values - m) dist = list(e / numpy.sum(e)) return dist x = T.vector() exp = s(x) f = theano.function([x], exp) test_values=get_standard_values() result = f(test_values) expected = softmax(test_values) print(str(result)) print(str(expected)) list_assert_equal(result, expected) def test_relu(): ''' Relu implementation doesn't depend on the value being a theano variable. Testing ints, floats and theano tensors. ''' from keras.activations import relu as r assert r(5) == 5 assert r(-5) == 0 assert r(-0.1) == 0 assert r(0.1) == 0.1 x = T.vector() exp = r(x) f = theano.function([x], exp) test_values = get_standard_values() result = f(test_values) list_assert_equal(result, test_values) # because no negatives in test values def test_tanh(): from keras.activations import tanh as t test_values = get_standard_values() x = T.vector() exp = t(x) f = theano.function([x], exp) result = f(test_values) expected = [math.tanh(v) for v in test_values] print(result) print(expected) list_assert_equal(result, expected) def test_linear(): ''' This function does no input validation, it just returns the thing that was passed in. ''' from keras.activations import linear as l xs = [1, 5, True, None, 'foo'] for x in xs: assert x == l(x) import builtins open = builtins.open # for seek() SEEK_SET = 0 SEEK_CUR = 1 SEEK_END = 2 r"""File-like objects that read from or write to a string buffer. This implements (nearly) all stdio methods. f = StringIO() # ready for writing f = StringIO(buf) # ready for reading f.close() # explicitly release resources held flag = f.isatty() # always false pos = f.tell() # get current position f.seek(pos) # set current position f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF buf = f.read() # read until EOF buf = f.read(n) # read up to n bytes buf = f.readline() # read until end of line ('\n') or EOF list = f.readlines()# list of f.readline() results until EOF f.truncate([size]) # truncate file at to at most size (default: current pos) f.write(buf) # write at current position f.writelines(list) # for line in list: f.write(line) f.getvalue() # return whole file's contents as a string Notes: - Using a real file is often faster (but less convenient). - There's also a much faster implementation in C, called cStringIO, but it's not subclassable. - fileno() is left unimplemented so that code which uses it triggers an exception early. - Seeking far beyond EOF and then writing will insert real null bytes that occupy space in the buffer. - There's a simple test set (see end of this file). """ try: from errno import EINVAL except ImportError: EINVAL = 22 __all__ = ["StringIO"] def _complain_ifclosed(closed): if closed: raise ValueError("I/O operation on closed file") class StringIO: """class StringIO([buffer]) When a StringIO object is created, it can be initialized to an existing string by passing the string to the constructor. If no string is given, the StringIO will start empty. The StringIO object can accept either Unicode or 8-bit strings, but mixing the two may take some care. If both are used, 8-bit strings that cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause a UnicodeError to be raised when getvalue() is called. """ def __init__(self, buf = ''): self.buf = buf self.len = len(buf) self.buflist = [] self.pos = 0 self.closed = False self.softspace = 0 def __iter__(self): return self def next(self): """A file object is its own iterator, for example iter(f) returns f (unless f is closed). When a file is used as an iterator, typically in a for loop (for example, for line in f: print line), the next() method is called repeatedly. This method returns the next input line, or raises StopIteration when EOF is hit. """ _complain_ifclosed(self.closed) r = self.readline() if not r: raise StopIteration return r def close(self): """Free the memory buffer. """ if not self.closed: self.closed = True del self.buf, self.pos def isatty(self): """Returns False because StringIO objects are not connected to a tty-like device. """ _complain_ifclosed(self.closed) return False def seek(self, pos, mode = 0): """Set the file's current position. The mode argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file's end). There is no return value. """ _complain_ifclosed(self.closed) if self.buflist: self.buf += ''.join(self.buflist) self.buflist = [] if mode == 1: pos += self.pos elif mode == 2: pos += self.len self.pos = max(0, pos) def tell(self): """Return the file's current position.""" _complain_ifclosed(self.closed) return self.pos def read(self, n = -1): """Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. """ _complain_ifclosed(self.closed) if self.buflist: self.buf += ''.join(self.buflist) self.buflist = [] if n is None or n < 0: newpos = self.len else: newpos = min(self.pos+n, self.len) r = self.buf[self.pos:newpos] self.pos = newpos return r def readline(self, length=None): r"""Read one entire line from the file. A trailing newline character is kept in the string (but may be absent when a file ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count (including the trailing newline) and an incomplete line may be returned. An empty string is returned only when EOF is encountered immediately. Note: Unlike stdio's fgets(), the returned string contains null characters ('\0') if they occurred in the input. """ _complain_ifclosed(self.closed) if self.buflist: self.buf += ''.join(self.buflist) self.buflist = [] i = self.buf.find('\n', self.pos) if i < 0: newpos = self.len else: newpos = i+1 if length is not None and length >= 0: if self.pos + length < newpos: newpos = self.pos + length r = self.buf[self.pos:newpos] self.pos = newpos return r def readlines(self, sizehint = 0): """Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (or more to accommodate a final whole line). """ total = 0 lines = [] line = self.readline() while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline() return lines def truncate(self, size=None): """Truncate the file's size. If the optional size argument is present, the file is truncated to (at most) that size. The size defaults to the current position. The current file position is not changed unless the position is beyond the new file size. If the specified size exceeds the file's current size, the file remains unchanged. """ _complain_ifclosed(self.closed) if size is None: size = self.pos elif size < 0: raise IOError(EINVAL, "Negative size not allowed") elif size < self.pos: self.pos = size self.buf = self.getvalue()[:size] self.len = size def write(self, s): """Write a string to the file. There is no return value. """ _complain_ifclosed(self.closed) if not s: return spos = self.pos slen = self.len if spos == slen: self.buflist.append(s) self.len = self.pos = spos + len(s) return if spos > slen: self.buflist.append('\0'*(spos - slen)) slen = spos newpos = spos + len(s) if spos < slen: if self.buflist: self.buf += ''.join(self.buflist) self.buflist = [self.buf[:spos], s, self.buf[newpos:]] self.buf = '' if newpos > slen: slen = newpos else: self.buflist.append(s) slen = newpos self.len = slen self.pos = newpos def writelines(self, iterable): """Write a sequence of strings to the file. The sequence can be any iterable object producing strings, typically a list of strings. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) """ write = self.write for line in iterable: write(line) def flush(self): """Flush the internal buffer """ _complain_ifclosed(self.closed) def getvalue(self): """ Retrieve the entire contents of the "file" at any time before the StringIO object's close() method is called. The StringIO object can accept either Unicode or 8-bit strings, but mixing the two may take some care. If both are used, 8-bit strings that cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause a UnicodeError to be raised when getvalue() is called. """ _complain_ifclosed(self.closed) if self.buflist: self.buf += ''.join(self.buflist) self.buflist = [] return self.buf TextIOWrapper = StringIO class RawIOBase: def read(self,n=-1): pass def readall(self): pass def readinto(self,b): pass def write(self,b): pass BufferedReader = RawIOBase from setuptools import setup from setuptools import find_packages from distutils.extension import Extension try: from Cython.Build import cythonize except ImportError: def cythonize(extensions): return extensions sources = ['rocksdb/_rocksdb.cpp'] else: sources = ['rocksdb/_rocksdb.pyx'] mod1 = Extension( 'rocksdb._rocksdb', sources, extra_compile_args=[ '-std=c++11', '-O3', '-Wall', '-Wextra', '-Wconversion', '-fno-strict-aliasing' ], language='c++', libraries=[ 'rocksdb', 'snappy', 'bz2', 'z' ] ) setup( name="pyrocksdb", version='0.5', description="Python bindings for RocksDB", keywords='rocksdb', author='Stephan Hofmockel', author_email="Use the github issues", url="https://github.com/stephan-hof/pyrocksdb", license='BSD License', install_requires=['setuptools'], package_dir={'rocksdb': 'rocksdb'}, packages=find_packages('.'), ext_modules=cythonize([mod1]), test_suite='rocksdb.tests', include_package_data=True ) #!/usr/bin/python # urllib2 with kerberos proof of concept # Copyright 2008 Lime Nest LLC # Copyright 2008 Lime Spot LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import logging import sys import urllib2 as u2 import kerberos as k LOG = logging.getLogger("http_kerberos_auth_handler") class AbstractKerberosAuthHandler: """auth handler for urllib2 that does Kerberos HTTP Negotiate Authentication """ def negotiate_value(self, headers): """checks for "Negotiate" in proper auth header """ authreq = headers.get(self.auth_header, None) if authreq: rx = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I) mo = rx.search(authreq) if mo: return mo.group(1) else: LOG.debug("regex failed on: %s" % authreq) else: LOG.debug("%s header not found" % self.auth_header) return None def __init__(self): self.retried = 0 self.context = None def generate_request_header(self, req, headers, neg_value): self.retried += 1 LOG.debug("retry count: %d" % self.retried) host = req.get_host() LOG.debug("req.get_host() returned %s" % host) # We need Python 2.4 compatibility #tail, sep, head = host.rpartition(':') #domain = tail or head host_parts = host.rsplit(':', 1) domain = host_parts[0] result, self.context = k.authGSSClientInit("HTTP@%s" % domain) if result < 1: LOG.warning("authGSSClientInit returned result %d" % result) return None LOG.debug("authGSSClientInit() succeeded") result = k.authGSSClientStep(self.context, neg_value) if result < 0: LOG.warning("authGSSClientStep returned result %d" % result) return None LOG.debug("authGSSClientStep() succeeded") response = k.authGSSClientResponse(self.context) LOG.debug("authGSSClientResponse() succeeded") return "Negotiate %s" % response def authenticate_server(self, headers): neg_value = self.negotiate_value(headers) if neg_value is None: LOG.critical("mutual auth failed. No negotiate header") return None result = k.authGSSClientStep(self.context, neg_value) if result < 1: # this is a critical security warning # should change to a raise --Tim LOG.critical("mutual auth failed: authGSSClientStep returned result %d" % result) pass def clean_context(self): if self.context is not None: LOG.debug("cleaning context") k.authGSSClientClean(self.context) self.context = None def http_error_auth_reqed(self, host, req, headers): neg_value = self.negotiate_value(headers) #Check for auth_header if neg_value is not None: if not self.retried > 0: return self.retry_http_kerberos_auth(req, headers, neg_value) else: return None else: self.retried = 0 def retry_http_kerberos_auth(self, req, headers, neg_value): try: try: neg_hdr = self.generate_request_header(req, headers, neg_value) if neg_hdr is None: LOG.debug("neg_hdr was None") return None req.add_unredirected_header(self.authz_header, neg_hdr) resp = self.parent.open(req) self.authenticate_server(resp.info()) return resp except k.GSSError, e: LOG.critical("GSSAPI Error: %s/%s" % (e[0][0], e[1][0])) return None finally: self.clean_context() self.retried = 0 class ProxyKerberosAuthHandler(u2.BaseHandler, AbstractKerberosAuthHandler): """Kerberos Negotiation handler for HTTP proxy auth """ authz_header = 'Proxy-Authorization' auth_header = 'proxy-authenticate' handler_order = 480 # before Digest auth def http_error_407(self, req, fp, code, msg, headers): LOG.debug("inside http_error_407") host = req.get_host() retry = self.http_error_auth_reqed(host, req, headers) self.retried = 0 return retry class HTTPKerberosAuthHandler(u2.BaseHandler, AbstractKerberosAuthHandler): """Kerberos Negotiation handler for HTTP auth """ authz_header = 'Authorization' auth_header = 'www-authenticate' handler_order = 480 # before Digest auth def http_error_401(self, req, fp, code, msg, headers): LOG.debug("inside http_error_401") host = req.get_host() retry = self.http_error_auth_reqed(host, req, headers) self.retried = 0 return retry def test(): LOG.setLevel(logging.DEBUG) LOG.info("starting test") opener = u2.build_opener() opener.add_handler(HTTPKerberosAuthHandler()) resp = opener.open(sys.argv[1]) print dir(resp), resp.info(), resp.code if __name__ == '__main__': test() #!/usr/bin/python # # Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. # import sys import time import argparse import ConfigParser from vnc_api.vnc_api import * from cfgm_common.exceptions import * class DatabaseNodeProvisioner(object): def __init__(self, args_str=None): self._args = None if not args_str: args_str = ' '.join(sys.argv[1:]) self._parse_args(args_str) connected = False tries = 0 while not connected: try: self._vnc_lib = VncApi( self._args.admin_user, self._args.admin_password, self._args.admin_tenant_name, self._args.api_server_ip, self._args.api_server_port, '/', auth_host=self._args.openstack_ip) connected = True except ResourceExhaustionError: # haproxy throws 503 if tries < 10: tries += 1 time.sleep(3) else: raise gsc_obj = self._vnc_lib.global_system_config_read( fq_name=['default-global-system-config']) self._global_system_config_obj = gsc_obj if self._args.oper == 'add': self.add_database_node() elif self._args.oper == 'del': self.del_database_node() else: print "Unknown operation %s. Only 'add' and 'del' supported"\ % (self._args.oper) # end __init__ def _parse_args(self, args_str): ''' Eg. python provision_database_node.py --host_name a3s30.contrail.juniper.net --host_ip 10.1.1.1 --api_server_ip 127.0.0.1 --api_server_port 8082 --oper ''' # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'api_server_ip': '127.0.0.1', 'api_server_port': '8082', 'oper': 'add', } ksopts = { 'admin_user': 'user1', 'admin_password': 'password1', 'admin_tenant_name': 'default-domain' } if args.conf_file: config = ConfigParser.SafeConfigParser() config.read([args.conf_file]) defaults.update(dict(config.items("DEFAULTS"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(ksopts) parser.set_defaults(**defaults) parser.add_argument( "--host_name", help="hostname name of database node", required=True) parser.add_argument("--host_ip", help="IP address of database node", required=True) parser.add_argument( "--api_server_ip", help="IP address of api server", required=True) parser.add_argument("--api_server_port", help="Port of api server") parser.add_argument( "--oper", default='add', help="Provision operation to be done(add or del)") parser.add_argument( "--admin_user", help="Name of keystone admin user") parser.add_argument( "--admin_password", help="Password of keystone admin user") parser.add_argument( "--admin_tenant_name", help="Tenamt name for keystone admin user") parser.add_argument( "--openstack_ip", help="IP address of openstack node") self._args = parser.parse_args(remaining_argv) # end _parse_args def add_database_node(self): gsc_obj = self._global_system_config_obj database_node_obj = DatabaseNode( self._args.host_name, gsc_obj, database_node_ip_address=self._args.host_ip) database_node_exists = True try: database_node_obj = self._vnc_lib.database_node_read( fq_name=database_node_obj.get_fq_name()) except NoIdError: database_node_exists = False if database_node_exists: self._vnc_lib.database_node_update(database_node_obj) else: self._vnc_lib.database_node_create(database_node_obj) # end add_database_node def del_database_node(self): gsc_obj = self._global_system_config_obj database_node_obj = DatabaseNode(self._args.host_name, gsc_obj) self._vnc_lib.database_node_delete( fq_name=database_node_obj.get_fq_name()) # end del_database_node # end class DatabaseNodeProvisioner def main(args_str=None): DatabaseNodeProvisioner(args_str) # end main if __name__ == "__main__": main() # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import webnotes import json def execute(): doctypes_child_tables_map = {} # Get all saved report columns columns = webnotes.conn.sql("""select defvalue, defkey from `tabDefaultValue` where defkey like '_list_settings:%'""") # Make map of doctype and child tables for value, key in columns: doctype = key.split(':')[-1] child_tables = webnotes.conn.sql_list("""select options from `tabDocField` where parent=%s and fieldtype='Table'""", doctype) doctypes_child_tables_map.setdefault(doctype, child_tables + [doctype]) # If defvalue contains child doctypes then only append the column for value, key in columns: new_columns = [] column_doctype = key.split(':')[-1] for field, field_doctype in json.loads(value): if field_doctype in doctypes_child_tables_map.get(column_doctype): new_columns.append([field, field_doctype]) if new_columns: webnotes.conn.sql("""update `tabDefaultValue` set defvalue=%s where defkey=%s""" % ('%s', '%s'), (json.dumps(new_columns), key)) else: webnotes.conn.sql("""delete from `tabDefaultValue` where defkey=%s""", key) # [The "BSD license"] # Copyright (c) 2013 Terence Parr # Copyright (c) 2013 Sam Harwell # Copyright (c) 2014 Eric Vergnaud # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #/ # A rule context is a record of a single rule invocation. It knows # which context invoked it, if any. If there is no parent context, then # naturally the invoking state is not valid. The parent link # provides a chain upwards from the current rule invocation to the root # of the invocation tree, forming a stack. We actually carry no # information about the rule associated with this context (except # when parsing). We keep only the state number of the invoking state from # the ATN submachine that invoked this. Contrast this with the s # pointer inside ParserRuleContext that tracks the current state # being "executed" for the current rule. # # The parent contexts are useful for computing lookahead sets and # getting error information. # # These objects are used during parsing and prediction. # For the special case of parsers, we use the subclass # ParserRuleContext. # # @see ParserRuleContext #/ from io import StringIO from antlr4.tree.Tree import RuleNode, INVALID_INTERVAL from antlr4.tree.Trees import Trees class RuleContext(RuleNode): EMPTY = None def __init__(self, parent=None, invokingState=-1): super(RuleContext, self).__init__() # What context invoked this rule? self.parentCtx = parent # What state invoked the rule associated with this context? # The "return address" is the followState of invokingState # If parent is null, this should be -1. self.invokingState = invokingState def depth(self): n = 0 p = self while p is not None: p = p.parentCtx n += 1 return n # A context is empty if there is no invoking state; meaning nobody call # current context. def isEmpty(self): return self.invokingState == -1 # satisfy the ParseTree / SyntaxTree interface def getSourceInterval(self): return INVALID_INTERVAL def getRuleContext(self): return self def getPayload(self): return self # Return the combined text of all child nodes. This method only considers # tokens which have been added to the parse tree. #

# Since tokens on hidden channels (e.g. whitespace or comments) are not # added to the parse trees, they will not appear in the output of this # method. #/ def getText(self): if self.getChildCount() == 0: return u"" with StringIO() as builder: for child in self.getChildren(): builder.write(child.getText()) return builder.getvalue() def getRuleIndex(self): return -1 def getChild(self, i): return None def getChildCount(self): return 0 def getChildren(self): for c in []: yield c def accept(self, visitor): return visitor.visitChildren(self) # # Call this method to view a parse tree in a dialog box visually.#/ # public Future inspect(@Nullable Parser parser) { # List ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null; # return inspect(ruleNames); # } # # public Future inspect(@Nullable List ruleNames) { # TreeViewer viewer = new TreeViewer(ruleNames, this); # return viewer.open(); # } # # # Save this tree in a postscript file#/ # public void save(@Nullable Parser parser, String fileName) # throws IOException, PrintException # { # List ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null; # save(ruleNames, fileName); # } # # # Save this tree in a postscript file using a particular font name and size#/ # public void save(@Nullable Parser parser, String fileName, # String fontName, int fontSize) # throws IOException # { # List ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null; # save(ruleNames, fileName, fontName, fontSize); # } # # # Save this tree in a postscript file#/ # public void save(@Nullable List ruleNames, String fileName) # throws IOException, PrintException # { # Trees.writePS(this, ruleNames, fileName); # } # # # Save this tree in a postscript file using a particular font name and size#/ # public void save(@Nullable List ruleNames, String fileName, # String fontName, int fontSize) # throws IOException # { # Trees.writePS(this, ruleNames, fileName, fontName, fontSize); # } # # # Print out a whole tree, not just a node, in LISP format # # (root child1 .. childN). Print just a node if this is a leaf. # # We have to know the recognizer so we can get rule names. # #/ # @Override # public String toStringTree(@Nullable Parser recog) { # return Trees.toStringTree(this, recog); # } # # Print out a whole tree, not just a node, in LISP format # (root child1 .. childN). Print just a node if this is a leaf. # def toStringTree(self, ruleNames=None, recog=None): return Trees.toStringTree(self, ruleNames=ruleNames, recog=recog) # } # # @Override # public String toStringTree() { # return toStringTree((List)null); # } # def __unicode__(self): return self.toString(None, None) # @Override # public String toString() { # return toString((List)null, (RuleContext)null); # } # # public final String toString(@Nullable Recognizer recog) { # return toString(recog, ParserRuleContext.EMPTY); # } # # public final String toString(@Nullable List ruleNames) { # return toString(ruleNames, null); # } # # // recog null unless ParserRuleContext, in which case we use subclass toString(...) # public String toString(@Nullable Recognizer recog, @Nullable RuleContext stop) { # String[] ruleNames = recog != null ? recog.getRuleNames() : null; # List ruleNamesList = ruleNames != null ? Arrays.asList(ruleNames) : null; # return toString(ruleNamesList, stop); # } def toString(self, ruleNames, stop): with StringIO() as buf: p = self buf.write(u"[") while p is not None and p is not stop: if ruleNames is None: if not p.isEmpty(): buf.write(unicode(p.invokingState)) else: ri = p.getRuleIndex() ruleName = ruleNames[ri] if ri >= 0 and ri < len(ruleNames) else unicode(ri) buf.write(ruleName) if p.parentCtx is not None and (ruleNames is not None or not p.parentCtx.isEmpty()): buf.write(u" ") p = p.parentCtx buf.write(u"]") return buf.getvalue() # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from future.builtins import int, object import re import six import unicodedata import random from abc import ABCMeta from bitstring import BitArray class Component(object): """Base class for all ISCC identifier components""" __metaclass__ = ABCMeta #: Base35 custom symbol table for conversion between `ident` and `code` SYMBOLS = u"H9ITDKR83F4SV12PAXWBYG57JQ6OCNMLUEZ" #: Base35 BASE = len(SYMBOLS) #: Regex for `code` validation STR_PATT = re.compile("^([A-Z1-9]*)$", re.UNICODE) #: Min value of internal `ident` INT_MIN = 0 #: Min length of `code` representation STR_MIN = 1 @property def INT_MAX(self): """Max value of internal `ident` (2**64-1)""" return 2 ** self.BIT_LENGTH - 1 @property def STR_MAX(self): return len(self.encode(self.INT_MAX)) def __init__(self, ident=None, code=None, normalize=True, validate=True, bits=64): """ :param int ident: Identifier integers value. :param str or unicode code: Identifier string representation for display :param bool normalize: Normalize `ident` and `code` before processing :param bool validate: Validate the identifier at instantiation. :param int bits: Number of bits of identifier component. """ self.BIT_LENGTH = bits # Case: mormalization if normalize and code is not None: code = self.normalize_code(code) if normalize and ident is not None: ident = self.normalize_ident(ident) # Case: create random identifier if ident is None and code is None: ident = self.random_ident() # Case: only `ident` passed in if ident is not None and code is None: code = self.encode(ident) # Case: only `code` passed in if code is not None and ident is None: ident = self.decode(code) self._int = ident self._str = code # Case: validation if validate: self.validate() @property def ident(self): """Internal integer value of identifier""" return self._int @property def code(self): """External string representation of identifier""" return self._str @property def bitstring(self): """String representation of bit-seqence""" return BitArray(uint=self.ident, length=self.BIT_LENGTH).bin @classmethod def normalize_code(cls, code): return unicodedata.normalize('NFKC', code).strip().upper() @staticmethod def normalize_ident(ident): return int(ident) @classmethod def random_ident(cls, bits=64): """Create a random identifier. :return int: Random identifier """ rand_crypt = random.SystemRandom() rand_id = rand_crypt.randint(cls.INT_MIN, 2 ** bits - 1) return rand_id def __int__(self): return self._int def __str__(self): return self._str def __repr__(self): return '{}({})'.format(self.__class__.__name__, self._int) def __eq__(self, other): """Identifiers are identical if their `ident`s are equal""" return self.ident == other.ident def __hash__(self): """Override for set uniqueness.""" return self.ident @classmethod def encode(cls, ident): """ :param int ident: Integer value of identifier :return str: String representation of identifier """ code = '' while ident > 0 or not code: ident, i = divmod(ident, cls.BASE) code += cls.SYMBOLS[i] return code @classmethod def decode(cls, code): """ :param str code: String representation of identifier :return int: Integer value of identifier """ ident = 0 for i, digit in enumerate(code): ident += cls.SYMBOLS.index(digit) * (cls.BASE ** i) return ident def hamming_distance(self, other): x = (self.ident ^ other.ident) & ((1 << self.BIT_LENGTH) - 1) tot = 0 while x: tot += 1 x &= x - 1 return tot def jaccard_similarity(self, other): """Bitwise jaccard coefficient of integers a, b""" same_bits = [(bit == other.bitstring[i]) for i, bit in enumerate(self.bitstring)].count(True) return same_bits / (2 * len(self.bitstring) - same_bits) def is_valid(self): """ :return bool: True or False """ return all(( self._int_valid(self._int), self._str_valid(self._str), self.encode(self._int) == self._str, self.decode(self._str) == self._int, )) def validate(self): """ :raises ValueError: Raises ValueError with help text if invalid :return bool: Returns True if valid or raises ValueError """ self._validate_int(self._int) self._validate_str(self._str) self._validate_match(self._str, self._int) return True def _validate_str(self, s): if not isinstance(s, six.text_type): raise ValueError( u's must be {} not {}'.format(six.text_type, type(s)) ) if not self._str_valid_chars(s): raise ValueError( u'text value `{}` must only contain [1-9][A-Z]'.format(s) ) if not self._str_valid_len(s): raise ValueError(u'text value `{}` must be {} to {} chars'.format( s, self.STR_MIN, self.STR_MAX )) def _validate_int(self, n): if not self._int_valid(n): raise ValueError(u'number value `{}` not between {} and {}'.format( n, self.INT_MIN, self.INT_MAX )) def _validate_match(self, s, n): if not self._is_match(s, n): raise ValueError( u'text/number representations donĀ“t match: {}!={}'.format( self.encode(n), s ) ) def _int_valid(self, n): return self.INT_MIN <= n <= self.INT_MAX def _str_valid(self, s): return self._str_valid_chars(s) and self._str_valid_len(s) def _str_valid_chars(self, s): return bool(self.STR_PATT.match(s)) def _str_valid_len(self, s): return self.STR_MIN <= len(s) <= self.STR_MAX def _is_match(self, s, n): return self.encode(n) == s from classtime.logging import logging logging = logging.getLogger(__name__) #pylint: disable=C0103 from classtime.core import db from classtime.models import Term, Schedule, Course, Section class StandardLocalDatabase(object): """A single institution's view of the local database Uses a stack-based accessor idiom. Usage: self.push_() ... use self.cur_datatype_model() ... self.pop_() """ def __init__(self, institution): self._institution = institution self._model_stack = list() self.Term = Term self.Schedule = Schedule self.Course = Course self.Section = Section def create(self): """Create the database, if it did not already exist """ db.create_all() def push_datatype(self, datatype): datatype = datatype.lower() if 'term' in datatype: self.push_terms() elif 'schedule' in datatype: self.push_schedules() elif 'course' in datatype: self.push_courses() elif 'section' in datatype: self.push_sections() else: logging.error('Cannot find datatype <{}>'.format(datatype)) return self def push_terms(self): """Filter all requests to Term objects only. Returns self, so this method should be chained with other methods. :returns: self :rtype: StandardLocalDatabase """ self._model_stack.append(Term) return self def push_schedules(self): """Filter all requests to Schedule objects only. Returns self, so this method should be chained with other methods. :returns: self :rtype: StandardLocalDatabase """ self._model_stack.append(Schedule) return self def push_courses(self): """Filter all requests to Course objects only. Returns self, so this method should be chained with other methods. :returns: self :rtype: StandardLocalDatabase """ self._model_stack.append(Course) return self def push_sections(self): """Filter all requests to Section objects only. Should be the first call in every chained call to the StandardLocalDatabase. :returns: self :rtype: StandardLocalDatabase """ self._model_stack.append(Section) return self def pop_datatype(self): self._model_stack.pop() return self def cur_datatype_model(self): return self._model_stack[-1] def exists(self, datatype, identifiers=None, **kwargs): """Checks whether an object exists with the given identifiers (primary key values). If no identifiers are given, checks if *any* object exists. Primary keys are specified in each models/*.py definition. Institution must be be omitted. It will be inferred from the institution of this local database instance. :returns: whether the object(s) exist(s) :rtype: boolean """ if kwargs: retval = self.query(datatype) \ .filter_by(**kwargs) \ .first() is not None elif identifiers is None: retval = self.query(datatype) \ .first() is not None else: retval = self.get(datatype, identifiers) is not None return retval def get(self, datatype, identifiers): self.push_datatype(datatype) identifiers = (self._institution,) + identifiers retval = self.cur_datatype_model().query.get(identifiers) self.pop_datatype() return retval def query(self, datatype): self.push_datatype(datatype) retval = self.cur_datatype_model() \ .query \ .filter_by(institution=self._institution) self.pop_datatype() return retval def add(self, model_dict, datatype): """Adds an 'add command' to the running transaction which will add a new model with attributes specified by dict 'data_dict' :param dict data_dict: dictionary of attributes to store in the object. """ self.push_datatype(datatype) model_dict['institution'] = self._institution db.session.add(self.cur_datatype_model()(model_dict)) self.pop_datatype() def update(self, model_dict, datatype, identifiers): db_obj = self.get(datatype=datatype, identifiers=identifiers) for attr, value in model_dict.iteritems(): setattr(db_obj, attr, value) def commit(self): """Commits the running transaction to the database If the commit fails, it will be rolled back to a safe state. """ try: db.session.commit() except: db.session.rollback() raise # -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from ast import literal_eval import cPickle from openerp import models, fields, api class pos_cache(models.Model): _name = 'pos.cache' cache = fields.Binary() product_domain = fields.Text(required=True) product_fields = fields.Text(required=True) config_id = fields.Many2one('pos.config', ondelete='cascade', required=True) compute_user_id = fields.Many2one('res.users', 'Cache compute user', required=True) @api.model def refresh_all_caches(self): self.env['pos.cache'].search([]).refresh_cache() @api.one def refresh_cache(self): products = self.env['product.product'].search(self.get_product_domain()) prod_ctx = products.with_context(pricelist=self.config_id.pricelist_id.id, display_default_code=False) prod_ctx = prod_ctx.sudo(self.compute_user_id.id) res = prod_ctx.read(self.get_product_fields()) datas = { 'cache': cPickle.dumps(res, protocol=cPickle.HIGHEST_PROTOCOL), } self.write(datas) @api.model def get_product_domain(self): return literal_eval(self.product_domain) @api.model def get_product_fields(self): return literal_eval(self.product_fields) @api.model def get_cache(self, domain, fields): if not self.cache or domain != self.get_product_domain() or fields != self.get_product_fields(): self.product_domain = str(domain) self.product_fields = str(fields) self.refresh_cache() return cPickle.loads(self.cache) class pos_config(models.Model): _inherit = 'pos.config' @api.one @api.depends('cache_ids') def _get_oldest_cache_time(self): pos_cache = self.env['pos.cache'] oldest_cache = pos_cache.search([('config_id', '=', self.id)], order='write_date', limit=1) if oldest_cache: self.oldest_cache_time = oldest_cache.write_date # Use a related model to avoid the load of the cache when the pos load his config cache_ids = fields.One2many('pos.cache', 'config_id') oldest_cache_time = fields.Datetime(compute='_get_oldest_cache_time', string='Oldest cache time', readonly=True) def _get_cache_for_user(self): pos_cache = self.env['pos.cache'] cache_for_user = pos_cache.search([('id', 'in', self.cache_ids.ids), ('compute_user_id', '=', self.env.uid)]) if cache_for_user: return cache_for_user[0] else: return None @api.multi def get_products_from_cache(self, fields, domain): cache_for_user = self._get_cache_for_user() if cache_for_user: return cache_for_user.get_cache(domain, fields) else: pos_cache = self.env['pos.cache'] pos_cache.create({ 'config_id': self.id, 'product_domain': str(domain), 'product_fields': str(fields), 'compute_user_id': self.env.uid }) new_cache = self._get_cache_for_user() return new_cache.get_cache(domain, fields) @api.one def delete_cache(self): # throw away the old caches self.cache_ids.unlink() """distutils.command.build_ext Implements the Distutils 'build_ext' command, for building extension modules (currently limited to C extensions, should accommodate C++ extensions ASAP).""" # This module should be kept compatible with Python 2.1. __revision__ = "$Id$" import sys, os, string, re from types import * from site import USER_BASE, USER_SITE from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version from distutils.dep_util import newer_group from distutils.extension import Extension from distutils.util import get_platform from distutils import log if os.name == 'nt': from distutils.msvccompiler import get_build_version MSVC_VERSION = int(get_build_version()) # An extension name is just a dot-separated list of Python NAMEs (ie. # the same as a fully-qualified module name). extension_name_re = re.compile \ (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$') def show_compilers (): from distutils.ccompiler import show_compilers show_compilers() class build_ext (Command): description = "build C/C++ extensions (compile/link to build directory)" # XXX thoughts on how to deal with complex command-line options like # these, i.e. how to make it so fancy_getopt can suck them off the # command line and make it look like setup.py defined the appropriate # lists of tuples of what-have-you. # - each command needs a callback to process its command-line options # - Command.__init__() needs access to its share of the whole # command line (must ultimately come from # Distribution.parse_command_line()) # - it then calls the current command class' option-parsing # callback to deal with weird options like -D, which have to # parse the option text and churn out some custom data # structure # - that data structure (in this case, a list of 2-tuples) # will then be present in the command object by the time # we get to finalize_options() (i.e. the constructor # takes care of both command-line and client options # in between initialize_options() and finalize_options()) sep_by = " (separated by '%s')" % os.pathsep user_options = [ ('build-lib=', 'b', "directory for compiled extension modules"), ('build-temp=', 't', "directory for temporary files (build by-products)"), ('plat-name=', 'p', "platform name to cross-compile for, if supported " "(default: %s)" % get_platform()), ('inplace', 'i', "ignore build-lib and put compiled extensions into the source " + "directory alongside your pure Python modules"), ('include-dirs=', 'I', "list of directories to search for header files" + sep_by), ('define=', 'D', "C preprocessor macros to define"), ('undef=', 'U', "C preprocessor macros to undefine"), ('libraries=', 'l', "external C libraries to link with"), ('library-dirs=', 'L', "directories to search for external C libraries" + sep_by), ('rpath=', 'R', "directories to search for shared C libraries at runtime"), ('link-objects=', 'O', "extra explicit link objects to include in the link"), ('debug', 'g', "compile/link with debugging information"), ('force', 'f', "forcibly build everything (ignore file timestamps)"), ('compiler=', 'c', "specify the compiler type"), ('swig-cpp', None, "make SWIG create C++ files (default is C)"), ('swig-opts=', None, "list of SWIG command line options"), ('swig=', None, "path to the SWIG executable"), ('user', None, "add user include, library and rpath"), ] boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user'] help_options = [ ('help-compiler', None, "list available compilers", show_compilers), ] def initialize_options (self): self.extensions = None self.build_lib = None self.plat_name = None self.build_temp = None self.inplace = 0 self.package = None self.include_dirs = None self.define = None self.undef = None self.libraries = None self.library_dirs = None self.rpath = None self.link_objects = None self.debug = None self.force = None self.compiler = None self.swig = None self.swig_cpp = None self.swig_opts = None self.user = None def finalize_options(self): from distutils import sysconfig self.set_undefined_options('build', ('build_lib', 'build_lib'), ('build_temp', 'build_temp'), ('compiler', 'compiler'), ('debug', 'debug'), ('force', 'force'), ('plat_name', 'plat_name'), ) if self.package is None: self.package = self.distribution.ext_package self.extensions = self.distribution.ext_modules # Make sure Python's include directories (for Python.h, pyconfig.h, # etc.) are in the include search path. py_include = sysconfig.get_python_inc() plat_py_include = sysconfig.get_python_inc(plat_specific=1) if self.include_dirs is None: self.include_dirs = self.distribution.include_dirs or [] if isinstance(self.include_dirs, str): self.include_dirs = self.include_dirs.split(os.pathsep) # Put the Python "system" include dir at the end, so that # any local include dirs take precedence. self.include_dirs.append(py_include) if plat_py_include != py_include: self.include_dirs.append(plat_py_include) if isinstance(self.libraries, str): self.libraries = [self.libraries] # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset if self.libraries is None: self.libraries = [] if self.library_dirs is None: self.library_dirs = [] elif type(self.library_dirs) is StringType: self.library_dirs = string.split(self.library_dirs, os.pathsep) if self.rpath is None: self.rpath = [] elif type(self.rpath) is StringType: self.rpath = string.split(self.rpath, os.pathsep) # for extensions under windows use different directories # for Release and Debug builds. # also Python's library directory must be appended to library_dirs if os.name == 'nt': # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: self.build_temp = os.path.join(self.build_temp, "Release") # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) if MSVC_VERSION == 9: # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = '' else: # win-amd64 or win-ia64 suffix = self.plat_name[4:] new_lib = os.path.join(sys.exec_prefix, 'PCbuild') if suffix: new_lib = os.path.join(new_lib, suffix) self.library_dirs.append(new_lib) elif MSVC_VERSION == 8: self.library_dirs.append(os.path.join(sys.exec_prefix, 'PC', 'VS8.0')) elif MSVC_VERSION == 7: self.library_dirs.append(os.path.join(sys.exec_prefix, 'PC', 'VS7.1')) else: self.library_dirs.append(os.path.join(sys.exec_prefix, 'PC', 'VC6')) # OS/2 (EMX) doesn't support Debug vs Release builds, but has the # import libraries in its "Config" subdirectory if os.name == 'os2': self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config')) # for extensions under Cygwin and AtheOS Python's library directory must be # appended to library_dirs if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos': if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): # building third party extensions self.library_dirs.append(os.path.join(sys.prefix, "lib", "python" + get_python_version(), "config")) else: # building python standard extensions self.library_dirs.append('.') # for extensions under Linux or Solaris with a shared Python library, # Python's library directory must be appended to library_dirs sysconfig.get_config_var('Py_ENABLE_SHARED') if ((sys.platform.startswith('linux') or sys.platform.startswith('gnu') or sys.platform.startswith('sunos')) and sysconfig.get_config_var('Py_ENABLE_SHARED')): if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): # building third party extensions self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) else: # building python standard extensions self.library_dirs.append('.') # The argument parsing will result in self.define being a string, but # it has to be a list of 2-tuples. All the preprocessor symbols # specified by the 'define' option will be set to '1'. Multiple # symbols can be separated with commas. if self.define: defines = self.define.split(',') self.define = map(lambda symbol: (symbol, '1'), defines) # The option for macros to undefine is also a string from the # option parsing, but has to be a list. Multiple symbols can also # be separated with commas here. if self.undef: self.undef = self.undef.split(',') if self.swig_opts is None: self.swig_opts = [] else: self.swig_opts = self.swig_opts.split(' ') # Finally add the user include and library directories if requested if self.user: user_include = os.path.join(USER_BASE, "include") user_lib = os.path.join(USER_BASE, "lib") if os.path.isdir(user_include): self.include_dirs.append(user_include) if os.path.isdir(user_lib): self.library_dirs.append(user_lib) self.rpath.append(user_lib) def run(self): from distutils.ccompiler import new_compiler # 'self.extensions', as supplied by setup.py, is a list of # Extension instances. See the documentation for Extension (in # distutils.extension) for details. # # For backwards compatibility with Distutils 0.8.2 and earlier, we # also allow the 'extensions' list to be a list of tuples: # (ext_name, build_info) # where build_info is a dictionary containing everything that # Extension instances do except the name, with a few things being # differently named. We convert these 2-tuples to Extension # instances as needed. if not self.extensions: return # If we were asked to build any C/C++ libraries, make sure that the # directory where we put them is in the library search path for # linking extensions. if self.distribution.has_c_libraries(): build_clib = self.get_finalized_command('build_clib') self.libraries.extend(build_clib.get_library_names() or []) self.library_dirs.append(build_clib.build_clib) # Setup the CCompiler object that we'll use to do all the # compiling and linking self.compiler = new_compiler(compiler=self.compiler, verbose=self.verbose, dry_run=self.dry_run, force=self.force) customize_compiler(self.compiler) # If we are cross-compiling, init the compiler now (if we are not # cross-compiling, init would not hurt, but people may rely on # late initialization of compiler even if they shouldn't...) if os.name == 'nt' and self.plat_name != get_platform(): self.compiler.initialize(self.plat_name) # And make sure that any compile/link-related options (which might # come from the command-line or from the setup script) are set in # that CCompiler object -- that way, they automatically apply to # all compiling and linking done here. if self.include_dirs is not None: self.compiler.set_include_dirs(self.include_dirs) if self.define is not None: # 'define' option is a list of (name,value) tuples for (name, value) in self.define: self.compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: self.compiler.undefine_macro(macro) if self.libraries is not None: self.compiler.set_libraries(self.libraries) if self.library_dirs is not None: self.compiler.set_library_dirs(self.library_dirs) if self.rpath is not None: self.compiler.set_runtime_library_dirs(self.rpath) if self.link_objects is not None: self.compiler.set_link_objects(self.link_objects) # Now actually compile and link everything. self.build_extensions() def check_extensions_list(self, extensions): """Ensure that the list of extensions (presumably provided as a command option 'extensions') is valid, i.e. it is a list of Extension objects. We also support the old-style list of 2-tuples, where the tuples are (ext_name, build_info), which are converted to Extension instances here. Raise DistutilsSetupError if the structure is invalid anywhere; just returns otherwise. """ if not isinstance(extensions, list): raise DistutilsSetupError, \ "'ext_modules' option must be a list of Extension instances" for i, ext in enumerate(extensions): if isinstance(ext, Extension): continue # OK! (assume type-checking done # by Extension constructor) if not isinstance(ext, tuple) or len(ext) != 2: raise DistutilsSetupError, \ ("each element of 'ext_modules' option must be an " "Extension instance or 2-tuple") ext_name, build_info = ext log.warn(("old-style (ext_name, build_info) tuple found in " "ext_modules for extension '%s'" "-- please convert to Extension instance" % ext_name)) if not (isinstance(ext_name, str) and extension_name_re.match(ext_name)): raise DistutilsSetupError, \ ("first element of each tuple in 'ext_modules' " "must be the extension name (a string)") if not isinstance(build_info, dict): raise DistutilsSetupError, \ ("second element of each tuple in 'ext_modules' " "must be a dictionary (build info)") # OK, the (ext_name, build_info) dict is type-safe: convert it # to an Extension instance. ext = Extension(ext_name, build_info['sources']) # Easy stuff: one-to-one mapping from dict elements to # instance attributes. for key in ('include_dirs', 'library_dirs', 'libraries', 'extra_objects', 'extra_compile_args', 'extra_link_args'): val = build_info.get(key) if val is not None: setattr(ext, key, val) # Medium-easy stuff: same syntax/semantics, different names. ext.runtime_library_dirs = build_info.get('rpath') if 'def_file' in build_info: log.warn("'def_file' element of build info dict " "no longer supported") # Non-trivial stuff: 'macros' split into 'define_macros' # and 'undef_macros'. macros = build_info.get('macros') if macros: ext.define_macros = [] ext.undef_macros = [] for macro in macros: if not (isinstance(macro, tuple) and len(macro) in (1, 2)): raise DistutilsSetupError, \ ("'macros' element of build info dict " "must be 1- or 2-tuple") if len(macro) == 1: ext.undef_macros.append(macro[0]) elif len(macro) == 2: ext.define_macros.append(macro) extensions[i] = ext def get_source_files(self): self.check_extensions_list(self.extensions) filenames = [] # Wouldn't it be neat if we knew the names of header files too... for ext in self.extensions: filenames.extend(ext.sources) return filenames def get_outputs(self): # Sanity check the 'extensions' list -- can't assume this is being # done in the same run as a 'build_extensions()' call (in fact, we # can probably assume that it *isn't*!). self.check_extensions_list(self.extensions) # And build the list of output (built) filenames. Note that this # ignores the 'inplace' flag, and assumes everything goes in the # "build" tree. outputs = [] for ext in self.extensions: outputs.append(self.get_ext_fullpath(ext.name)) return outputs def build_extensions(self): # First, sanity-check the 'extensions' list self.check_extensions_list(self.extensions) for ext in self.extensions: self.build_extension(ext) def build_extension(self, ext): sources = ext.sources if sources is None or type(sources) not in (ListType, TupleType): raise DistutilsSetupError, \ ("in 'ext_modules' option (extension '%s'), " + "'sources' must be present and must be " + "a list of source filenames") % ext.name sources = list(sources) ext_path = self.get_ext_fullpath(ext.name) depends = sources + ext.depends if not (self.force or newer_group(depends, ext_path, 'newer')): log.debug("skipping '%s' extension (up-to-date)", ext.name) return else: log.info("building '%s' extension", ext.name) # First, scan the sources for SWIG definition files (.i), run # SWIG on 'em to create .c files, and modify the sources list # accordingly. sources = self.swig_sources(sources, ext) # Next, compile the source code to object files. # XXX not honouring 'define_macros' or 'undef_macros' -- the # CCompiler API needs to change to accommodate this, and I # want to do one thing at a time! # Two possible sources for extra compiler arguments: # - 'extra_compile_args' in Extension object # - CFLAGS environment variable (not particularly # elegant, but people seem to expect it and I # guess it's useful) # The environment variable should take precedence, and # any sensible compiler will give precedence to later # command line args. Hence we combine them in order: extra_args = ext.extra_compile_args or [] macros = ext.define_macros[:] for undef in ext.undef_macros: macros.append((undef,)) objects = self.compiler.compile(sources, output_dir=self.build_temp, macros=macros, include_dirs=ext.include_dirs, debug=self.debug, extra_postargs=extra_args, depends=ext.depends) # XXX -- this is a Vile HACK! # # The setup.py script for Python on Unix needs to be able to # get this list so it can perform all the clean up needed to # avoid keeping object files around when cleaning out a failed # build of an extension module. Since Distutils does not # track dependencies, we have to get rid of intermediates to # ensure all the intermediates will be properly re-built. # self._built_objects = objects[:] # Now link the object files together into a "shared object" -- # of course, first we have to figure out all the other things # that go into the mix. if ext.extra_objects: objects.extend(ext.extra_objects) extra_args = ext.extra_link_args or [] # Detect target language, if not provided language = ext.language or self.compiler.detect_language(sources) self.compiler.link_shared_object( objects, ext_path, libraries=self.get_libraries(ext), library_dirs=ext.library_dirs, runtime_library_dirs=ext.runtime_library_dirs, extra_postargs=extra_args, export_symbols=self.get_export_symbols(ext), debug=self.debug, build_temp=self.build_temp, target_lang=language) def swig_sources (self, sources, extension): """Walk the list of source files in 'sources', looking for SWIG interface (.i) files. Run SWIG on all that are found, and return a modified 'sources' list with SWIG source files replaced by the generated C (or C++) files. """ new_sources = [] swig_sources = [] swig_targets = {} # XXX this drops generated C/C++ files into the source tree, which # is fine for developers who want to distribute the generated # source -- but there should be an option to put SWIG output in # the temp dir. if self.swig_cpp: log.warn("--swig-cpp is deprecated - use --swig-opts=-c++") if self.swig_cpp or ('-c++' in self.swig_opts) or \ ('-c++' in extension.swig_opts): target_ext = '.cpp' else: target_ext = '.c' for source in sources: (base, ext) = os.path.splitext(source) if ext == ".i": # SWIG interface file new_sources.append(base + '_wrap' + target_ext) swig_sources.append(source) swig_targets[source] = new_sources[-1] else: new_sources.append(source) if not swig_sources: return new_sources swig = self.swig or self.find_swig() swig_cmd = [swig, "-python"] swig_cmd.extend(self.swig_opts) if self.swig_cpp: swig_cmd.append("-c++") # Do not override commandline arguments if not self.swig_opts: for o in extension.swig_opts: swig_cmd.append(o) for source in swig_sources: target = swig_targets[source] log.info("swigging %s to %s", source, target) self.spawn(swig_cmd + ["-o", target, source]) return new_sources # swig_sources () def find_swig (self): """Return the name of the SWIG executable. On Unix, this is just "swig" -- it should be in the PATH. Tries a bit harder on Windows. """ if os.name == "posix": return "swig" elif os.name == "nt": # Look for SWIG in its standard installation directory on # Windows (or so I presume!). If we find it there, great; # if not, act like Unix and assume it's in the PATH. for vers in ("1.3", "1.2", "1.1"): fn = os.path.join("c:\\swig%s" % vers, "swig.exe") if os.path.isfile(fn): return fn else: return "swig.exe" elif os.name == "os2": # assume swig available in the PATH. return "swig.exe" else: raise DistutilsPlatformError, \ ("I don't know how to find (much less run) SWIG " "on platform '%s'") % os.name # find_swig () # -- Name generators ----------------------------------------------- # (extension names, filenames, whatever) def get_ext_fullpath(self, ext_name): """Returns the path of the filename for a given extension. The file is located in `build_lib` or directly in the package (inplace option). """ # makes sure the extension name is only using dots all_dots = string.maketrans('/'+os.sep, '..') ext_name = ext_name.translate(all_dots) fullname = self.get_ext_fullname(ext_name) modpath = fullname.split('.') filename = self.get_ext_filename(ext_name) filename = os.path.split(filename)[-1] if not self.inplace: # no further work needed # returning : # build_dir/package/path/filename filename = os.path.join(*modpath[:-1]+[filename]) return os.path.join(self.build_lib, filename) # the inplace option requires to find the package directory # using the build_py command for that package = '.'.join(modpath[0:-1]) build_py = self.get_finalized_command('build_py') package_dir = os.path.abspath(build_py.get_package_dir(package)) # returning # package_dir/filename return os.path.join(package_dir, filename) def get_ext_fullname(self, ext_name): """Returns the fullname of a given extension name. Adds the `package.` prefix""" if self.package is None: return ext_name else: return self.package + '.' + ext_name def get_ext_filename(self, ext_name): r"""Convert the name of an extension (eg. "foo.bar") into the name of the file from which it will be loaded (eg. "foo/bar.so", or "foo\bar.pyd"). """ from distutils.sysconfig import get_config_var ext_path = string.split(ext_name, '.') # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] # extensions in debug_mode are named 'module_d.pyd' under windows so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: return os.path.join(*ext_path) + '_d' + so_ext return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): """Return the list of symbols that a shared extension has to export. This either uses 'ext.export_symbols' or, if it's not provided, "init" + module_name. Only relevant on Windows, where the .pyd file (DLL) must export the module "init" function. """ initfunc_name = "init" + ext.name.split('.')[-1] if initfunc_name not in ext.export_symbols: ext.export_symbols.append(initfunc_name) return ext.export_symbols def get_libraries (self, ext): """Return the list of libraries to link against when building a shared extension. On most platforms, this is just 'ext.libraries'; on Windows and OS/2, we add the Python library (eg. python20.dll). """ # The python library is always needed on Windows. For MSVC, this # is redundant, since the library is mentioned in a pragma in # pyconfig.h that MSVC groks. The other Windows compilers all seem # to need it mentioned explicitly, though, so that's what we do. # Append '_d' to the python import library on debug builds. if sys.platform == "win32": from distutils.msvccompiler import MSVCCompiler if not isinstance(self.compiler, MSVCCompiler): template = "python%d%d" if self.debug: template = template + '_d' pythonlib = (template % (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) # don't extend ext.libraries, it may be shared with other # extensions, it is a reference to the original list return ext.libraries + [pythonlib] else: return ext.libraries elif sys.platform == "os2emx": # EMX/GCC requires the python library explicitly, and I # believe VACPP does as well (though not confirmed) - AIM Apr01 template = "python%d%d" # debug versions of the main DLL aren't supported, at least # not at this time - AIM Apr01 #if self.debug: # template = template + '_d' pythonlib = (template % (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) # don't extend ext.libraries, it may be shared with other # extensions, it is a reference to the original list return ext.libraries + [pythonlib] elif sys.platform[:6] == "cygwin": template = "python%d.%d" pythonlib = (template % (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) # don't extend ext.libraries, it may be shared with other # extensions, it is a reference to the original list return ext.libraries + [pythonlib] elif sys.platform[:6] == "atheos": from distutils import sysconfig template = "python%d.%d" pythonlib = (template % (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) # Get SHLIBS from Makefile extra = [] for lib in sysconfig.get_config_var('SHLIBS').split(): if lib.startswith('-l'): extra.append(lib[2:]) else: extra.append(lib) # don't extend ext.libraries, it may be shared with other # extensions, it is a reference to the original list return ext.libraries + [pythonlib, "m"] + extra elif sys.platform == 'darwin': # Don't use the default code below return ext.libraries elif sys.platform[:3] == 'aix': # Don't use the default code below return ext.libraries else: from distutils import sysconfig if sysconfig.get_config_var('Py_ENABLE_SHARED'): template = "python%d.%d" pythonlib = (template % (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) return ext.libraries + [pythonlib] else: return ext.libraries # class build_ext from umlfri2.application.events.solution import CloseSolutionEvent from .events.model import DiagramDeletedEvent from .events.tabs import OpenTabEvent, ChangedCurrentTabEvent, ClosedTabEvent from .tab import Tab class TabList: def __init__(self, application): self.__tabs = [] self.__application = application self.__current_tab = None self.__locked_tabs = set() application.event_dispatcher.subscribe(DiagramDeletedEvent, self.__diagram_deleted) application.event_dispatcher.subscribe(CloseSolutionEvent, self.__solution_closed) def __diagram_deleted(self, event): tab = self.get_tab_for(event.diagram) if tab is not None: tab.close() def __solution_closed(self, event): events = [] for tab in self.__tabs: events.append(ClosedTabEvent(tab)) self.__tabs = [] self.__application.event_dispatcher.dispatch_all(events) self.__current_tab = None self.__application.event_dispatcher.dispatch(ChangedCurrentTabEvent(None)) def reset_lock_status(self): self.__locked_tabs = {tab.drawing_area.diagram.save_id for tab in self.__tabs if tab.locked} @property def lock_status_changed(self): new_locked_tabs = {tab.drawing_area.diagram.save_id for tab in self.__tabs if tab.locked} return self.__locked_tabs != new_locked_tabs def get_tab_for(self, diagram): for tab in self.__tabs: if tab.drawing_area.diagram is diagram: return tab return None def open_new_project_tabs(self, tabs): last_tab = None for tab_info in tabs: tab = Tab(self.__application, self, tab_info.diagram, locked=tab_info.locked) self.__tabs.append(tab) self.__application.event_dispatcher.dispatch(OpenTabEvent(tab)) last_tab = tab if last_tab is not None: self.__current_tab = last_tab self.__application.event_dispatcher.dispatch(ChangedCurrentTabEvent(last_tab)) def select_tab(self, diagram): if self.__current_tab is not None: self.__current_tab.drawing_area.reset_action() if diagram is None: self.__current_tab = None self.__application.event_dispatcher.dispatch(ChangedCurrentTabEvent(None)) return for tab in self.__tabs: if tab.drawing_area.diagram is diagram: if self.__current_tab is not tab: self.__current_tab = tab self.__application.event_dispatcher.dispatch(ChangedCurrentTabEvent(tab)) return tab else: tab = Tab(self.__application, self, diagram) self.__tabs.append(tab) self.__current_tab = tab self.__application.event_dispatcher.dispatch(OpenTabEvent(tab)) self.__application.event_dispatcher.dispatch(ChangedCurrentTabEvent(tab)) return tab def _close_tab(self, tab): if tab.locked: tab.unlock() tab_id = self.__tabs.index(tab) del self.__tabs[tab_id] if tab_id < len(self.__tabs): self.__current_tab = self.__tabs[tab_id] elif self.__tabs: self.__current_tab = self.__tabs[-1] else: self.__current_tab = None self.__application.event_dispatcher.dispatch(ClosedTabEvent(tab)) self.__application.event_dispatcher.dispatch(ChangedCurrentTabEvent(self.__current_tab)) def close_all(self): events = [] new_tabs = [] for tab in self.__tabs: if tab.locked: new_tabs.append(tab) else: events.append(ClosedTabEvent(tab)) self.__tabs = new_tabs self.__application.event_dispatcher.dispatch_all(events) if new_tabs: self.__current_tab = new_tabs[-1] self.__application.event_dispatcher.dispatch(ChangedCurrentTabEvent(new_tabs[-1])) else: self.__current_tab = None self.__application.event_dispatcher.dispatch(ChangedCurrentTabEvent(None)) @property def current_tab(self): return self.__current_tab def __iter__(self): yield from self.__tabs # coding: utf-8 from __future__ import unicode_literals import hashlib import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_request, compat_urlparse, ) from ..utils import ( ExtractorError, sanitized_Request, urlencode_postdata, ) class FC2IE(InfoExtractor): _VALID_URL = r'^(?:https?://video\.fc2\.com/(?:[^/]+/)*content/|fc2:)(?P[^/]+)' IE_NAME = 'fc2' _NETRC_MACHINE = 'fc2' _TESTS = [{ 'url': 'http://video.fc2.com/en/content/20121103kUan1KHs', 'md5': 'a6ebe8ebe0396518689d963774a54eb7', 'info_dict': { 'id': '20121103kUan1KHs', 'ext': 'flv', 'title': 'Boxing again with Puff', }, }, { 'url': 'http://video.fc2.com/en/content/20150125cEva0hDn/', 'info_dict': { 'id': '20150125cEva0hDn', 'ext': 'mp4', }, 'params': { 'username': 'ytdl@yt-dl.org', 'password': '(snip)', }, 'skip': 'requires actual password', }, { 'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF', 'only_matching': True, }] def _login(self): username, password = self._get_login_info() if username is None or password is None: return False # Log in login_form_strs = { 'email': username, 'password': password, 'done': 'video', 'Submit': ' Login ', } login_data = urlencode_postdata(login_form_strs) request = sanitized_Request( 'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data) login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in') if 'mode=redirect&login=done' not in login_results: self.report_warning('unable to log in: bad username or password') return False # this is also needed