Update python stuff to latest

Probably definitely break linux
This commit is contained in:
Lirusaito
2019-04-20 16:25:51 -04:00
parent a5be867238
commit f7386e8e70
30 changed files with 545 additions and 7082 deletions

View File

@@ -1,27 +0,0 @@
"""\
@file __init__.py
@brief Initialization file for the indra.base module.
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""

View File

@@ -1,73 +0,0 @@
#!/usr/bin/python
##
## $LicenseInfo:firstyear=2011&license=viewerlgpl$
## Second Life Viewer Source Code
## Copyright (C) 2011, Linden Research, Inc.
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation;
## version 2.1 of the License only.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
## $/LicenseInfo$
from indra.base import llsd, lluuid
from datetime import datetime
import cllsd
import time, sys
class myint(int):
pass
values = (
'&<>',
u'\u81acj',
llsd.uri('http://foo<'),
lluuid.UUID(),
llsd.LLSD(['thing']),
1,
myint(31337),
sys.maxint + 10,
llsd.binary('foo'),
[],
{},
{u'f&\u1212': 3},
3.1,
True,
None,
datetime.fromtimestamp(time.time()),
)
def valuator(values):
for v in values:
yield v
longvalues = () # (values, list(values), iter(values), valuator(values))
for v in values + longvalues:
print '%r => %r' % (v, cllsd.llsd_to_xml(v))
a = [[{'a':3}]] * 1000000
s = time.time()
print hash(cllsd.llsd_to_xml(a))
e = time.time()
t1 = e - s
print t1
s = time.time()
print hash(llsd.LLSDXMLFormatter()._format(a))
e = time.time()
t2 = e - s
print t2
print 'Speedup:', t2 / t1

View File

@@ -1,266 +0,0 @@
"""\
@file config.py
@brief Utility module for parsing and accessing the indra.xml config file.
$LicenseInfo:firstyear=2006&license=mit$
Copyright (c) 2006-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import copy
import errno
import os
import traceback
import time
import types
from os.path import dirname, getmtime, join, realpath
from indra.base import llsd
_g_config = None
class IndraConfig(object):
"""
IndraConfig loads a 'indra' xml configuration file
and loads into memory. This representation in memory
can get updated to overwrite values or add new values.
The xml configuration file is considered a live file and changes
to the file are checked and reloaded periodically. If a value had
been overwritten via the update or set method, the loaded values
from the file are ignored (the values from the update/set methods
override)
"""
def __init__(self, indra_config_file):
self._indra_config_file = indra_config_file
self._reload_check_interval = 30 # seconds
self._last_check_time = 0
self._last_mod_time = 0
self._config_overrides = {}
self._config_file_dict = {}
self._combined_dict = {}
self._load()
def _load(self):
# if you initialize the IndraConfig with None, no attempt
# is made to load any files
if self._indra_config_file is None:
return
config_file = open(self._indra_config_file)
self._config_file_dict = llsd.parse(config_file.read())
self._combine_dictionaries()
config_file.close()
self._last_mod_time = self._get_last_modified_time()
self._last_check_time = time.time() # now
def _get_last_modified_time(self):
"""
Returns the mtime (last modified time) of the config file,
if such exists.
"""
if self._indra_config_file is not None:
return os.path.getmtime(self._indra_config_file)
return 0
def _combine_dictionaries(self):
self._combined_dict = {}
self._combined_dict.update(self._config_file_dict)
self._combined_dict.update(self._config_overrides)
def _reload_if_necessary(self):
now = time.time()
if (now - self._last_check_time) > self._reload_check_interval:
self._last_check_time = now
try:
modtime = self._get_last_modified_time()
if modtime > self._last_mod_time:
self._load()
except OSError, e:
if e.errno == errno.ENOENT: # file not found
# someone messed with our internal state
# or removed the file
print 'WARNING: Configuration file has been removed ' + (self._indra_config_file)
print 'Disabling reloading of configuration file.'
traceback.print_exc()
self._indra_config_file = None
self._last_check_time = 0
self._last_mod_time = 0
else:
raise # pass the exception along to the caller
def __getitem__(self, key):
self._reload_if_necessary()
return self._combined_dict[key]
def get(self, key, default = None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
"""
Sets the value of the config setting of key to be newval
Once any key/value pair is changed via the set method,
that key/value pair will remain set with that value until
change via the update or set method
"""
self._config_overrides[key] = value
self._combine_dictionaries()
def set(self, key, newval):
return self.__setitem__(key, newval)
def update(self, new_conf):
"""
Load an XML file and apply its map as overrides or additions
to the existing config. Update can be a file or a dict.
Once any key/value pair is changed via the update method,
that key/value pair will remain set with that value until
change via the update or set method
"""
if isinstance(new_conf, dict):
overrides = new_conf
else:
# assuming that it is a filename
config_file = open(new_conf)
overrides = llsd.parse(config_file.read())
config_file.close()
self._config_overrides.update(overrides)
self._combine_dictionaries()
def as_dict(self):
"""
Returns immutable copy of the IndraConfig as a dictionary
"""
return copy.deepcopy(self._combined_dict)
def load(config_xml_file = None):
global _g_config
load_default_files = config_xml_file is None
if load_default_files:
## going from:
## "/opt/linden/indra/lib/python/indra/base/config.py"
## to:
## "/opt/linden/etc/indra.xml"
config_xml_file = realpath(
dirname(realpath(__file__)) + "../../../../../../etc/indra.xml")
try:
_g_config = IndraConfig(config_xml_file)
except IOError:
# Failure to load passed in file
# or indra.xml default file
if load_default_files:
try:
config_xml_file = realpath(
dirname(realpath(__file__)) + "../../../../../../etc/globals.xml")
_g_config = IndraConfig(config_xml_file)
return
except IOError:
# Failure to load globals.xml
# fall to code below
pass
# Either failed to load passed in file
# or failed to load all default files
_g_config = IndraConfig(None)
def dump(indra_xml_file, indra_cfg = None, update_in_mem=False):
'''
Dump config contents into a file
Kindof reverse of load.
Optionally takes a new config to dump.
Does NOT update global config unless requested.
'''
global _g_config
if not indra_cfg:
if _g_config is None:
return
indra_cfg = _g_config.as_dict()
if not indra_cfg:
return
config_file = open(indra_xml_file, 'w')
_config_xml = llsd.format_xml(indra_cfg)
config_file.write(_config_xml)
config_file.close()
if update_in_mem:
update(indra_cfg)
def update(new_conf):
global _g_config
if _g_config is None:
# To keep with how this function behaved
# previously, a call to update
# before the global is defined
# make a new global config which does not
# load data from a file.
_g_config = IndraConfig(None)
return _g_config.update(new_conf)
def get(key, default = None):
global _g_config
if _g_config is None:
load()
return _g_config.get(key, default)
def set(key, newval):
"""
Sets the value of the config setting of key to be newval
Once any key/value pair is changed via the set method,
that key/value pair will remain set with that value until
change via the update or set method or program termination
"""
global _g_config
if _g_config is None:
_g_config = IndraConfig(None)
_g_config.set(key, newval)
def get_config():
global _g_config
return _g_config

File diff suppressed because it is too large Load Diff

View File

@@ -1,319 +0,0 @@
"""\
@file lluuid.py
@brief UUID parser/generator.
$LicenseInfo:firstyear=2004&license=mit$
Copyright (c) 2004-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import random, socket, string, time, re
import uuid
try:
# Python 2.6
from hashlib import md5
except ImportError:
# Python 2.5 and earlier
from md5 import new as md5
def _int2binstr(i,l):
s=''
for a in range(l):
s=chr(i&0xFF)+s
i>>=8
return s
def _binstr2int(s):
i = long(0)
for c in s:
i = (i<<8) + ord(c)
return i
class UUID(object):
"""
A class which represents a 16 byte integer. Stored as a 16 byte 8
bit character string.
The string version is to be of the form:
AAAAAAAA-AAAA-BBBB-BBBB-BBBBBBCCCCCC (a 128-bit number in hex)
where A=network address, B=timestamp, C=random.
"""
NULL_STR = "00000000-0000-0000-0000-000000000000"
# the UUIDREGEX_STRING is helpful for parsing UUID's in text
hex_wildcard = r"[0-9a-fA-F]"
word = hex_wildcard + r"{4,4}-"
long_word = hex_wildcard + r"{8,8}-"
very_long_word = hex_wildcard + r"{12,12}"
UUID_REGEX_STRING = long_word + word + word + word + very_long_word
uuid_regex = re.compile(UUID_REGEX_STRING)
rand = random.Random()
ip = ''
try:
ip = socket.gethostbyname(socket.gethostname())
except(socket.gaierror, socket.error):
# no ip address, so just default to somewhere in 10.x.x.x
ip = '10'
for i in range(3):
ip += '.' + str(rand.randrange(1,254))
hexip = ''.join(["%04x" % long(i) for i in ip.split('.')])
lastid = ''
def __init__(self, possible_uuid=None):
"""
Initialize to first valid UUID in argument (if a string),
or to null UUID if none found or argument is not supplied.
If the argument is a UUID, the constructed object will be a copy of it.
"""
self._bits = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
if possible_uuid is None:
return
if isinstance(possible_uuid, type(self)):
self.set(possible_uuid)
return
uuid_match = UUID.uuid_regex.search(possible_uuid)
if uuid_match:
uuid_string = uuid_match.group()
s = string.replace(uuid_string, '-', '')
self._bits = _int2binstr(string.atol(s[:8],16),4) + \
_int2binstr(string.atol(s[8:16],16),4) + \
_int2binstr(string.atol(s[16:24],16),4) + \
_int2binstr(string.atol(s[24:],16),4)
def __len__(self):
"""
Used by the len() builtin.
"""
return 36
def __nonzero__(self):
return self._bits != "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
def __str__(self):
uuid_string = self.toString()
return uuid_string
__repr__ = __str__
def __getitem__(self, index):
return str(self)[index]
def __eq__(self, other):
if isinstance(other, (str, unicode)):
return other == str(self)
return self._bits == getattr(other, '_bits', '')
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
return self._bits <= other._bits
def __ge__(self, other):
return self._bits >= other._bits
def __lt__(self, other):
return self._bits < other._bits
def __gt__(self, other):
return self._bits > other._bits
def __hash__(self):
return hash(self._bits)
def set(self, uuid):
self._bits = uuid._bits
def setFromString(self, uuid_string):
"""
Given a string version of a uuid, set self bits
appropriately. Returns self.
"""
s = string.replace(uuid_string, '-', '')
self._bits = _int2binstr(string.atol(s[:8],16),4) + \
_int2binstr(string.atol(s[8:16],16),4) + \
_int2binstr(string.atol(s[16:24],16),4) + \
_int2binstr(string.atol(s[24:],16),4)
return self
def setFromMemoryDump(self, gdb_string):
"""
We expect to get gdb_string as four hex units. eg:
0x147d54db 0xc34b3f1b 0x714f989b 0x0a892fd2
Which will be translated to:
db547d14-1b3f4bc3-9b984f71-d22f890a
Returns self.
"""
s = string.replace(gdb_string, '0x', '')
s = string.replace(s, ' ', '')
t = ''
for i in range(8,40,8):
for j in range(0,8,2):
t = t + s[i-j-2:i-j]
self.setFromString(t)
def toString(self):
"""
Return as a string matching the LL standard
AAAAAAAA-AAAA-BBBB-BBBB-BBBBBBCCCCCC (a 128-bit number in hex)
where A=network address, B=timestamp, C=random.
"""
return uuid_bits_to_string(self._bits)
def getAsString(self):
"""
Return a different string representation of the form
AAAAAAAA-AAAABBBB-BBBBBBBB-BBCCCCCC (a 128-bit number in hex)
where A=network address, B=timestamp, C=random.
"""
i1 = _binstr2int(self._bits[0:4])
i2 = _binstr2int(self._bits[4:8])
i3 = _binstr2int(self._bits[8:12])
i4 = _binstr2int(self._bits[12:16])
return '%08lx-%08lx-%08lx-%08lx' % (i1,i2,i3,i4)
def generate(self):
"""
Generate a new uuid. This algorithm is slightly different
from c++ implementation for portability reasons.
Returns self.
"""
m = md5()
m.update(uuid.uuid1().bytes)
self._bits = m.digest()
return self
def isNull(self):
"""
Returns 1 if the uuid is null - ie, equal to default uuid.
"""
return (self._bits == "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0")
def xor(self, rhs):
"""
xors self with rhs.
"""
v1 = _binstr2int(self._bits[0:4]) ^ _binstr2int(rhs._bits[0:4])
v2 = _binstr2int(self._bits[4:8]) ^ _binstr2int(rhs._bits[4:8])
v3 = _binstr2int(self._bits[8:12]) ^ _binstr2int(rhs._bits[8:12])
v4 = _binstr2int(self._bits[12:16]) ^ _binstr2int(rhs._bits[12:16])
self._bits = _int2binstr(v1,4) + \
_int2binstr(v2,4) + \
_int2binstr(v3,4) + \
_int2binstr(v4,4)
# module-level null constant
NULL = UUID()
def printTranslatedMemory(four_hex_uints):
"""
We expect to get the string as four hex units. eg:
0x147d54db 0xc34b3f1b 0x714f989b 0x0a892fd2
Which will be translated to:
db547d14-1b3f4bc3-9b984f71-d22f890a
"""
uuid = UUID()
uuid.setFromMemoryDump(four_hex_uints)
print uuid.toString()
def isUUID(id_str):
"""
This function returns:
- 1 if the string passed is a UUID
- 0 is the string passed is not a UUID
- None if it neither of the if's below is satisfied
"""
if not id_str or len(id_str) < 5 or len(id_str) > 36:
return 0
if isinstance(id_str, UUID) or UUID.uuid_regex.match(id_str):
return 1
return None
def isPossiblyID(id_str):
"""
This function returns 1 if the string passed has some uuid-like
characteristics. Otherwise returns 0.
"""
is_uuid = isUUID(id_str)
if is_uuid is not None:
return is_uuid
# build a string which matches every character.
hex_wildcard = r"[0-9a-fA-F]"
chars = len(id_str)
next = min(chars, 8)
matcher = hex_wildcard+"{"+str(next)+","+str(next)+"}"
chars = chars - next
if chars > 0:
matcher = matcher + "-"
chars = chars - 1
for block in range(3):
next = max(min(chars, 4), 0)
if next:
matcher = matcher + hex_wildcard+"{"+str(next)+","+str(next)+"}"
chars = chars - next
if chars > 0:
matcher = matcher + "-"
chars = chars - 1
if chars > 0:
next = min(chars, 12)
matcher = matcher + hex_wildcard+"{"+str(next)+","+str(next)+"}"
#print matcher
uuid_matcher = re.compile(matcher)
if uuid_matcher.match(id_str):
return 1
return 0
def uuid_bits_to_string(bits):
i1 = _binstr2int(bits[0:4])
i2 = _binstr2int(bits[4:6])
i3 = _binstr2int(bits[6:8])
i4 = _binstr2int(bits[8:10])
i5 = _binstr2int(bits[10:12])
i6 = _binstr2int(bits[12:16])
return '%08lx-%04lx-%04lx-%04lx-%04lx%08lx' % (i1,i2,i3,i4,i5,i6)
def uuid_bits_to_uuid(bits):
return UUID(uuid_bits_to_string(bits))
try:
from mulib import stacked
stacked.NoProducer() # just to exercise stacked
except:
#print "Couldn't import mulib.stacked, not registering UUID converter"
pass
else:
def convertUUID(uuid, req):
req.write(str(uuid))
stacked.add_producer(UUID, convertUUID, "*/*")
stacked.add_producer(UUID, convertUUID, "text/html")

View File

@@ -1,121 +0,0 @@
"""\
@file metrics.py
@author Phoenix
@date 2007-11-27
@brief simple interface for logging metrics
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import sys
try:
import syslog
except ImportError:
# Windows
import sys
class syslog(object):
# wrap to a lame syslog for windows
_logfp = sys.stderr
def syslog(msg):
_logfp.write(msg)
if not msg.endswith('\n'):
_logfp.write('\n')
syslog = staticmethod(syslog)
from indra.base.llsd import format_notation
def record_metrics(table, stats):
"Write a standard metrics log"
_log("LLMETRICS", table, stats)
def record_event(table, data):
"Write a standard logmessage log"
_log("LLLOGMESSAGE", table, data)
def set_destination(dest):
"""Set the destination of metrics logs for this process.
If you do not call this function prior to calling a logging
method, that function will open sys.stdout as a destination.
Attempts to set dest to None will throw a RuntimeError.
@param dest a file-like object which will be the destination for logs."""
if dest is None:
raise RuntimeError("Attempt to unset metrics destination.")
global _destination
_destination = dest
def destination():
"""Get the destination of the metrics logs for this process.
Returns None if no destination is set"""
global _destination
return _destination
class SysLogger(object):
"A file-like object which writes to syslog."
def __init__(self, ident='indra', logopt = None, facility = None):
try:
if logopt is None:
logopt = syslog.LOG_CONS | syslog.LOG_PID
if facility is None:
facility = syslog.LOG_LOCAL0
syslog.openlog(ident, logopt, facility)
import atexit
atexit.register(syslog.closelog)
except AttributeError:
# No syslog module on Windows
pass
def write(str):
syslog.syslog(str)
write = staticmethod(write)
def flush():
pass
flush = staticmethod(flush)
#
# internal API
#
_sequence_id = 0
_destination = None
def _next_id():
global _sequence_id
next = _sequence_id
_sequence_id += 1
return next
def _dest():
global _destination
if _destination is None:
# this default behavior is documented in the metrics functions above.
_destination = sys.stdout
return _destination
def _log(header, table, data):
log_line = "%s (%d) %s %s" \
% (header, _next_id(), table, format_notation(data))
dest = _dest()
dest.write(log_line)
dest.flush()

View File

@@ -1,30 +0,0 @@
#!/usr/bin/python
## $LicenseInfo:firstyear=2011&license=viewerlgpl$
## Second Life Viewer Source Code
## Copyright (C) 2011, Linden Research, Inc.
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation;
## version 2.1 of the License only.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
## $/LicenseInfo$
import warnings
warnings.warn("indra.ipc.httputil has been deprecated; use eventlet.httpc instead", DeprecationWarning, 2)
from eventlet.httpc import *
makeConnection = make_connection

View File

@@ -1,100 +0,0 @@
"""\
@file llsdhttp.py
@brief Functions to ease moving llsd over http
$LicenseInfo:firstyear=2006&license=mit$
Copyright (c) 2006-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import os.path
import os
import urlparse
from indra.base import llsd
from eventlet import httpc
suite = httpc.HttpSuite(llsd.format_xml, llsd.parse, 'application/llsd+xml')
delete = suite.delete
delete_ = suite.delete_
get = suite.get
get_ = suite.get_
head = suite.head
head_ = suite.head_
post = suite.post
post_ = suite.post_
put = suite.put
put_ = suite.put_
request = suite.request
request_ = suite.request_
# import every httpc error exception into our namespace for convenience
for x in httpc.status_to_error_map.itervalues():
globals()[x.__name__] = x
ConnectionError = httpc.ConnectionError
Retriable = httpc.Retriable
for x in (httpc.ConnectionError,):
globals()[x.__name__] = x
def postFile(url, filename):
f = open(filename)
body = f.read()
f.close()
llsd_body = llsd.parse(body)
return post_(url, llsd_body)
# deprecated in favor of get_
def getStatus(url, use_proxy=False):
status, _headers, _body = get_(url, use_proxy=use_proxy)
return status
# deprecated in favor of put_
def putStatus(url, data):
status, _headers, _body = put_(url, data)
return status
# deprecated in favor of delete_
def deleteStatus(url):
status, _headers, _body = delete_(url)
return status
# deprecated in favor of post_
def postStatus(url, data):
status, _headers, _body = post_(url, data)
return status
def postFileStatus(url, filename):
status, _headers, body = postFile(url, filename)
return status, body
def getFromSimulator(path, use_proxy=False):
return get('http://' + simulatorHostAndPort + path, use_proxy=use_proxy)
def postToSimulator(path, data=None):
return post('http://' + simulatorHostAndPort + path, data)

View File

@@ -1,81 +0,0 @@
"""\
@file mysql_pool.py
@brief Thin wrapper around eventlet.db_pool that chooses MySQLdb and Tpool.
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import MySQLdb
from eventlet import db_pool
class DatabaseConnector(db_pool.DatabaseConnector):
def __init__(self, credentials, *args, **kwargs):
super(DatabaseConnector, self).__init__(MySQLdb, credentials,
conn_pool=db_pool.ConnectionPool,
*args, **kwargs)
# get is extended relative to eventlet.db_pool to accept a port argument
def get(self, host, dbname, port=3306):
key = (host, dbname, port)
if key not in self._databases:
new_kwargs = self._kwargs.copy()
new_kwargs['db'] = dbname
new_kwargs['host'] = host
new_kwargs['port'] = port
new_kwargs.update(self.credentials_for(host))
dbpool = ConnectionPool(*self._args, **new_kwargs)
self._databases[key] = dbpool
return self._databases[key]
class ConnectionPool(db_pool.TpooledConnectionPool):
"""A pool which gives out saranwrapped MySQLdb connections from a pool
"""
def __init__(self, *args, **kwargs):
super(ConnectionPool, self).__init__(MySQLdb, *args, **kwargs)
def get(self):
conn = super(ConnectionPool, self).get()
# annotate the connection object with the details on the
# connection; this is used elsewhere to check that you haven't
# suddenly changed databases in midstream while making a
# series of queries on a connection.
arg_names = ['host','user','passwd','db','port','unix_socket','conv','connect_timeout',
'compress', 'named_pipe', 'init_command', 'read_default_file', 'read_default_group',
'cursorclass', 'use_unicode', 'charset', 'sql_mode', 'client_flag', 'ssl',
'local_infile']
# you could have constructed this connectionpool with a mix of
# keyword and non-keyword arguments, but we want to annotate
# the connection object with a dict so it's easy to check
# against so here we are converting the list of non-keyword
# arguments (in self._args) into a dict of keyword arguments,
# and merging that with the actual keyword arguments
# (self._kwargs). The arg_names variable lists the
# constructor arguments for MySQLdb Connection objects.
converted_kwargs = dict([ (arg_names[i], arg) for i, arg in enumerate(self._args) ])
converted_kwargs.update(self._kwargs)
conn.connection_parameters = converted_kwargs
return conn

View File

@@ -1,165 +0,0 @@
"""\
@file russ.py
@brief Recursive URL Substitution Syntax helpers
@author Phoenix
Many details on how this should work is available on the wiki:
https://wiki.secondlife.com/wiki/Recursive_URL_Substitution_Syntax
Adding features to this should be reflected in that page in the
implementations section.
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import urllib
from indra.ipc import llsdhttp
class UnbalancedBraces(Exception):
pass
class UnknownDirective(Exception):
pass
class BadDirective(Exception):
pass
def format_value_for_path(value):
if type(value) in [list, tuple]:
# *NOTE: treat lists as unquoted path components so that the quoting
# doesn't get out-of-hand. This is a workaround for the fact that
# russ always quotes, even if the data it's given is already quoted,
# and it's not safe to simply unquote a path directly, so if we want
# russ to substitute urls parts inside other url parts we always
# have to do so via lists of unquoted path components.
return '/'.join([urllib.quote(str(item)) for item in value])
else:
return urllib.quote(str(value))
def format(format_str, context):
"""@brief Format format string according to rules for RUSS.
@see https://osiris.lindenlab.com/mediawiki/index.php/Recursive_URL_Substitution_Syntax
@param format_str The input string to format.
@param context A map used for string substitutions.
@return Returns the formatted string. If no match, the braces remain intact.
"""
while True:
#print "format_str:", format_str
all_matches = _find_sub_matches(format_str)
if not all_matches:
break
substitutions = 0
while True:
matches = all_matches.pop()
# we work from right to left to make sure we do not
# invalidate positions earlier in format_str
matches.reverse()
for pos in matches:
# Use index since _find_sub_matches should have raised
# an exception, and failure to find now is an exception.
end = format_str.index('}', pos)
#print "directive:", format_str[pos+1:pos+5]
if format_str[pos + 1] == '$':
value = context[format_str[pos + 2:end]]
if value is not None:
value = format_value_for_path(value)
elif format_str[pos + 1] == '%':
value = _build_query_string(
context.get(format_str[pos + 2:end]))
elif format_str[pos+1:pos+5] == 'http' or format_str[pos+1:pos+5] == 'file':
value = _fetch_url_directive(format_str[pos + 1:end])
else:
raise UnknownDirective, format_str[pos:end + 1]
if value is not None:
format_str = format_str[:pos]+str(value)+format_str[end+1:]
substitutions += 1
# If there were any substitutions at this depth, re-parse
# since this may have revealed new things to substitute
if substitutions:
break
if not all_matches:
break
# If there were no substitutions at all, and we have exhausted
# the possible matches, bail.
if not substitutions:
break
return format_str
def _find_sub_matches(format_str):
"""@brief Find all of the substitution matches.
@param format_str the RUSS conformant format string.
@return Returns an array of depths of arrays of positional matches in input.
"""
depth = 0
matches = []
for pos in range(len(format_str)):
if format_str[pos] == '{':
depth += 1
if not len(matches) == depth:
matches.append([])
matches[depth - 1].append(pos)
continue
if format_str[pos] == '}':
depth -= 1
continue
if not depth == 0:
raise UnbalancedBraces, format_str
return matches
def _build_query_string(query_dict):
"""\
@breif given a dict, return a query string. utility wrapper for urllib.
@param query_dict input query dict
@returns Returns an urlencoded query string including leading '?'.
"""
if query_dict:
keys = query_dict.keys()
keys.sort()
def stringize(value):
if type(value) in (str,unicode):
return value
else:
return str(value)
query_list = [urllib.quote(str(key)) + '=' + urllib.quote(stringize(query_dict[key])) for key in keys]
return '?' + '&'.join(query_list)
else:
return ''
def _fetch_url_directive(directive):
"*FIX: This only supports GET"
commands = directive.split('|')
resource = llsdhttp.get(commands[0])
if len(commands) == 3:
resource = _walk_resource(resource, commands[2])
return resource
def _walk_resource(resource, path):
path = path.split('/')
for child in path:
if not child:
continue
resource = resource[child]
return resource

View File

@@ -1,134 +0,0 @@
"""\
@file servicebuilder.py
@author Phoenix
@brief Class which will generate service urls.
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
from indra.base import config
from indra.ipc import llsdhttp
from indra.ipc import russ
# *NOTE: agent presence relies on this variable existing and being current, it is a huge hack
services_config = {}
try:
services_config = llsdhttp.get(config.get('services-config'))
except:
pass
_g_builder = None
def _builder():
global _g_builder
if _g_builder is None:
_g_builder = ServiceBuilder()
return _g_builder
def build(name, context={}, **kwargs):
""" Convenience method for using a global, singleton, service builder. Pass arguments either via a dict or via python keyword arguments, or both!
Example use:
> context = {'channel':'Second Life Release', 'version':'1.18.2.0'}
> servicebuilder.build('version-manager-version', context)
'http://int.util.vaak.lindenlab.com/channel/Second%20Life%20Release/1.18.2.0'
> servicebuilder.build('version-manager-version', channel='Second Life Release', version='1.18.2.0')
'http://int.util.vaak.lindenlab.com/channel/Second%20Life%20Release/1.18.2.0'
> servicebuilder.build('version-manager-version', context, version='1.18.1.2')
'http://int.util.vaak.lindenlab.com/channel/Second%20Life%20Release/1.18.1.2'
"""
global _g_builder
if _g_builder is None:
_g_builder = ServiceBuilder()
return _g_builder.buildServiceURL(name, context, **kwargs)
def build_path(name, context={}, **kwargs):
context = context.copy() # shouldn't modify the caller's dictionary
context.update(kwargs)
return _builder().buildPath(name, context)
class ServiceBuilder(object):
def __init__(self, services_definition = services_config):
"""\
@brief
@brief Create a ServiceBuilder.
@param services_definition Complete services definition, services.xml.
"""
# no need to keep a copy of the services section of the
# complete services definition, but it doesn't hurt much.
self.services = services_definition['services']
self.builders = {}
for service in self.services:
service_builder = service.get('service-builder')
if not service_builder:
continue
if isinstance(service_builder, dict):
# We will be constructing several builders
for name, builder in service_builder.iteritems():
full_builder_name = service['name'] + '-' + name
self.builders[full_builder_name] = builder
else:
self.builders[service['name']] = service_builder
def buildPath(self, name, context):
"""\
@brief given the environment on construction, return a service path.
@param name The name of the service.
@param context A dict of name value lookups for the service.
@returns Returns the
"""
return russ.format(self.builders[name], context)
def buildServiceURL(self, name, context={}, **kwargs):
"""\
@brief given the environment on construction, return a service URL.
@param name The name of the service.
@param context A dict of name value lookups for the service.
@param kwargs Any keyword arguments are treated as members of the
context, this allows you to be all 31337 by writing shit like:
servicebuilder.build('name', param=value)
@returns Returns the
"""
context = context.copy() # shouldn't modify the caller's dictionary
context.update(kwargs)
base_url = config.get('services-base-url')
svc_path = russ.format(self.builders[name], context)
return base_url + svc_path
def on_in(query_name, host_key, schema_key):
"""\
@brief Constructs an on/in snippet (for running named queries)
from a schema name and two keys referencing values stored in
indra.xml.
@param query_name Name of the query.
@param host_key Logical name of destination host. Will be
looked up in indra.xml.
@param schema_key Logical name of destination schema. Will
be looked up in indra.xml.
"""
return "on/config:%s/in/config:%s/%s" % (host_key.strip('/'),
schema_key.strip('/'),
query_name.lstrip('/'))

View File

@@ -1,468 +0,0 @@
"""\
@file siesta.py
@brief A tiny llsd based RESTful web services framework
$LicenseInfo:firstyear=2008&license=mit$
Copyright (c) 2008, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
from indra.base import config
from indra.base import llsd
from webob import exc
import webob
import re, socket
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import cjson
json_decode = cjson.decode
json_encode = cjson.encode
JsonDecodeError = cjson.DecodeError
JsonEncodeError = cjson.EncodeError
except ImportError:
import simplejson
json_decode = simplejson.loads
json_encode = simplejson.dumps
JsonDecodeError = ValueError
JsonEncodeError = TypeError
llsd_parsers = {
'application/json': json_decode,
llsd.BINARY_MIME_TYPE: llsd.parse_binary,
'application/llsd+notation': llsd.parse_notation,
llsd.XML_MIME_TYPE: llsd.parse_xml,
'application/xml': llsd.parse_xml,
}
def mime_type(content_type):
'''Given a Content-Type header, return only the MIME type.'''
return content_type.split(';', 1)[0].strip().lower()
class BodyLLSD(object):
'''Give a webob Request or Response an llsd based "content" property.
Getting the content property parses the body, and caches the result.
Setting the content property formats a payload, and the body property
is set.'''
def _llsd__get(self):
'''Get, set, or delete the LLSD value stored in this object.'''
try:
return self._llsd
except AttributeError:
if not self.body:
raise AttributeError('No llsd attribute has been set')
else:
mtype = mime_type(self.content_type)
try:
parser = llsd_parsers[mtype]
except KeyError:
raise exc.HTTPUnsupportedMediaType(
'Content type %s not supported' % mtype).exception
try:
self._llsd = parser(self.body)
except (llsd.LLSDParseError, JsonDecodeError, TypeError), err:
raise exc.HTTPBadRequest(
'Could not parse body: %r' % err.args).exception
return self._llsd
def _llsd__set(self, val):
req = getattr(self, 'request', None)
if req is not None:
formatter, ctype = formatter_for_request(req)
self.content_type = ctype
else:
formatter, ctype = formatter_for_mime_type(
mime_type(self.content_type))
self.body = formatter(val)
def _llsd__del(self):
if hasattr(self, '_llsd'):
del self._llsd
content = property(_llsd__get, _llsd__set, _llsd__del)
class Response(webob.Response, BodyLLSD):
'''Response class with LLSD support.
A sensible default content type is used.
Setting the llsd property also sets the body. Getting the llsd
property parses the body if necessary.
If you set the body property directly, the llsd property will be
deleted.'''
default_content_type = 'application/llsd+xml'
def _body__set(self, body):
if hasattr(self, '_llsd'):
del self._llsd
super(Response, self)._body__set(body)
def cache_forever(self):
self.cache_expires(86400 * 365)
body = property(webob.Response._body__get, _body__set,
webob.Response._body__del,
webob.Response._body__get.__doc__)
class Request(webob.Request, BodyLLSD):
'''Request class with LLSD support.
Sensible content type and accept headers are used by default.
Setting the content property also sets the body. Getting the content
property parses the body if necessary.
If you set the body property directly, the content property will be
deleted.'''
default_content_type = 'application/llsd+xml'
default_accept = ('application/llsd+xml; q=0.5, '
'application/llsd+notation; q=0.3, '
'application/llsd+binary; q=0.2, '
'application/xml; q=0.1, '
'application/json; q=0.0')
def __init__(self, environ=None, *args, **kwargs):
if environ is None:
environ = {}
else:
environ = environ.copy()
if 'CONTENT_TYPE' not in environ:
environ['CONTENT_TYPE'] = self.default_content_type
if 'HTTP_ACCEPT' not in environ:
environ['HTTP_ACCEPT'] = self.default_accept
super(Request, self).__init__(environ, *args, **kwargs)
def _body__set(self, body):
if hasattr(self, '_llsd'):
del self._llsd
super(Request, self)._body__set(body)
def path_urljoin(self, *parts):
return '/'.join([path_url.rstrip('/')] + list(parts))
body = property(webob.Request._body__get, _body__set,
webob.Request._body__del, webob.Request._body__get.__doc__)
def create_response(self, content=None, status='200 OK',
conditional_response=webob.NoDefault):
resp = self.ResponseClass(status=status, request=self,
conditional_response=conditional_response)
resp.content = content
return resp
def curl(self):
'''Create and fill out a pycurl easy object from this request.'''
import pycurl
c = pycurl.Curl()
c.setopt(pycurl.URL, self.url())
if self.headers:
c.setopt(pycurl.HTTPHEADER,
['%s: %s' % (k, self.headers[k]) for k in self.headers])
c.setopt(pycurl.FOLLOWLOCATION, True)
c.setopt(pycurl.AUTOREFERER, True)
c.setopt(pycurl.MAXREDIRS, 16)
c.setopt(pycurl.NOSIGNAL, True)
c.setopt(pycurl.READFUNCTION, self.body_file.read)
c.setopt(pycurl.SSL_VERIFYHOST, 2)
if self.method == 'POST':
c.setopt(pycurl.POST, True)
post301 = getattr(pycurl, 'POST301', None)
if post301 is not None:
# Added in libcurl 7.17.1.
c.setopt(post301, True)
elif self.method == 'PUT':
c.setopt(pycurl.PUT, True)
elif self.method != 'GET':
c.setopt(pycurl.CUSTOMREQUEST, self.method)
return c
Request.ResponseClass = Response
Response.RequestClass = Request
llsd_formatters = {
'application/json': json_encode,
'application/llsd+binary': llsd.format_binary,
'application/llsd+notation': llsd.format_notation,
'application/llsd+xml': llsd.format_xml,
'application/xml': llsd.format_xml,
}
formatter_qualities = (
('application/llsd+xml', 1.0),
('application/llsd+notation', 0.5),
('application/llsd+binary', 0.4),
('application/xml', 0.3),
('application/json', 0.2),
)
def formatter_for_mime_type(mime_type):
'''Return a formatter that encodes to the given MIME type.
The result is a pair of function and MIME type.'''
try:
return llsd_formatters[mime_type], mime_type
except KeyError:
raise exc.HTTPInternalServerError(
'Could not use MIME type %r to format response' %
mime_type).exception
def formatter_for_request(req):
'''Return a formatter that encodes to the preferred type of the client.
The result is a pair of function and actual MIME type.'''
ctype = req.accept.best_match(formatter_qualities)
try:
return llsd_formatters[ctype], ctype
except KeyError:
raise exc.HTTPNotAcceptable().exception
def wsgi_adapter(func, environ, start_response):
'''Adapt a Siesta callable to act as a WSGI application.'''
# Process the request as appropriate.
try:
req = Request(environ)
#print req.urlvars
resp = func(req, **req.urlvars)
if not isinstance(resp, webob.Response):
try:
formatter, ctype = formatter_for_request(req)
resp = req.ResponseClass(formatter(resp), content_type=ctype)
resp._llsd = resp
except (JsonEncodeError, TypeError), err:
resp = exc.HTTPInternalServerError(
detail='Could not format response')
except exc.HTTPException, e:
resp = e
except socket.error, e:
resp = exc.HTTPInternalServerError(detail=e.args[1])
return resp(environ, start_response)
def llsd_callable(func):
'''Turn a callable into a Siesta application.'''
def replacement(environ, start_response):
return wsgi_adapter(func, environ, start_response)
return replacement
def llsd_method(http_method, func):
def replacement(environ, start_response):
if environ['REQUEST_METHOD'] == http_method:
return wsgi_adapter(func, environ, start_response)
return exc.HTTPMethodNotAllowed()(environ, start_response)
return replacement
http11_methods = 'OPTIONS GET HEAD POST PUT DELETE TRACE CONNECT'.split()
http11_methods.sort()
def llsd_class(cls):
'''Turn a class into a Siesta application.
A new instance is created for each request. A HTTP method FOO is
turned into a call to the handle_foo method of the instance.'''
def foo(req, **kwargs):
instance = cls()
method = req.method.lower()
try:
handler = getattr(instance, 'handle_' + method)
except AttributeError:
allowed = [m for m in http11_methods
if hasattr(instance, 'handle_' + m.lower())]
raise exc.HTTPMethodNotAllowed(
headers={'Allow': ', '.join(allowed)}).exception
#print "kwargs: ", kwargs
return handler(req, **kwargs)
def replacement(environ, start_response):
return wsgi_adapter(foo, environ, start_response)
return replacement
def curl(reqs):
import pycurl
m = pycurl.CurlMulti()
curls = [r.curl() for r in reqs]
io = {}
for c in curls:
fp = StringIO()
hdr = StringIO()
c.setopt(pycurl.WRITEFUNCTION, fp.write)
c.setopt(pycurl.HEADERFUNCTION, hdr.write)
io[id(c)] = fp, hdr
m.handles = curls
try:
while True:
ret, num_handles = m.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
finally:
m.close()
for req, c in zip(reqs, curls):
fp, hdr = io[id(c)]
hdr.seek(0)
status = hdr.readline().rstrip()
headers = []
name, values = None, None
# XXX We don't currently handle bogus header data.
for line in hdr.readlines():
if not line[0].isspace():
if name:
headers.append((name, ' '.join(values)))
name, value = line.strip().split(':', 1)
value = [value]
else:
values.append(line.strip())
if name:
headers.append((name, ' '.join(values)))
resp = c.ResponseClass(fp.getvalue(), status, headers, request=req)
route_re = re.compile(r'''
\{ # exact character "{"
(\w*) # "config" or variable (restricted to a-z, 0-9, _)
(?:([:~])([^}]+))? # optional :type or ~regex part
\} # exact character "}"
''', re.VERBOSE)
predefined_regexps = {
'uuid': r'[a-f0-9][a-f0-9-]{31,35}',
'int': r'\d+',
'host': r'[a-z0-9][a-z0-9\-\.]*',
}
def compile_route(route):
fp = StringIO()
last_pos = 0
for match in route_re.finditer(route):
#print "matches: ", match.groups()
fp.write(re.escape(route[last_pos:match.start()]))
var_name = match.group(1)
sep = match.group(2)
expr = match.group(3)
if var_name == 'config':
expr = re.escape(str(config.get(var_name)))
else:
if expr:
if sep == ':':
expr = predefined_regexps[expr]
# otherwise, treat what follows '~' as a regexp
else:
expr = '[^/]+'
if var_name != '':
expr = '(?P<%s>%s)' % (var_name, expr)
else:
expr = '(%s)' % (expr,)
fp.write(expr)
last_pos = match.end()
fp.write(re.escape(route[last_pos:]))
compiled_route = '^%s$' % fp.getvalue()
#print route, "->", compiled_route
return compiled_route
class Router(object):
'''WSGI routing class. Parses a URL and hands off a request to
some other WSGI application. If no suitable application is found,
responds with a 404.'''
def __init__(self):
self._new_routes = []
self._routes = []
self._paths = []
def add(self, route, app, methods=None):
self._new_routes.append((route, app, methods))
def _create_routes(self):
for route, app, methods in self._new_routes:
self._paths.append(route)
self._routes.append(
(re.compile(compile_route(route)),
app,
methods and dict.fromkeys(methods)))
self._new_routes = []
def __call__(self, environ, start_response):
# load up the config from the config file. Only needs to be
# done once per interpreter. This is the entry point of all
# siesta applications, so this is where we trap it.
_conf = config.get_config()
if _conf is None:
import os.path
fname = os.path.join(
environ.get('ll.config_dir', '/local/linden/etc'),
'indra.xml')
config.load(fname)
# proceed with handling the request
self._create_routes()
path_info = environ['PATH_INFO']
request_method = environ['REQUEST_METHOD']
allowed = []
for regex, app, methods in self._routes:
m = regex.match(path_info)
if m:
#print "groupdict:",m.groupdict()
if not methods or request_method in methods:
environ['paste.urlvars'] = m.groupdict()
return app(environ, start_response)
else:
allowed += methods
if allowed:
allowed = dict.fromkeys(allows).keys()
allowed.sort()
resp = exc.HTTPMethodNotAllowed(
headers={'Allow': ', '.join(allowed)})
else:
resp = exc.HTTPNotFound()
return resp(environ, start_response)

View File

@@ -1,235 +0,0 @@
#!/usr/bin/python
## $LicenseInfo:firstyear=2011&license=viewerlgpl$
## Second Life Viewer Source Code
## Copyright (C) 2011, Linden Research, Inc.
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation;
## version 2.1 of the License only.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
## $/LicenseInfo$
from indra.base import llsd, lluuid
from indra.ipc import siesta
import datetime, math, unittest
from webob import exc
class ClassApp(object):
def handle_get(self, req):
pass
def handle_post(self, req):
return req.llsd
def callable_app(req):
if req.method == 'UNDERPANTS':
raise exc.HTTPMethodNotAllowed()
elif req.method == 'GET':
return None
return req.llsd
class TestBase:
def test_basic_get(self):
req = siesta.Request.blank('/')
self.assertEquals(req.get_response(self.server).body,
llsd.format_xml(None))
def test_bad_method(self):
req = siesta.Request.blank('/')
req.environ['REQUEST_METHOD'] = 'UNDERPANTS'
self.assertEquals(req.get_response(self.server).status_int,
exc.HTTPMethodNotAllowed.code)
json_safe = {
'none': None,
'bool_true': True,
'bool_false': False,
'int_zero': 0,
'int_max': 2147483647,
'int_min': -2147483648,
'long_zero': 0,
'long_max': 2147483647L,
'long_min': -2147483648L,
'float_zero': 0,
'float': math.pi,
'float_huge': 3.14159265358979323846e299,
'str_empty': '',
'str': 'foo',
u'unic\u1e51de_empty': u'',
u'unic\u1e51de': u'\u1e4exx\u10480',
}
json_safe['array'] = json_safe.values()
json_safe['tuple'] = tuple(json_safe.values())
json_safe['dict'] = json_safe.copy()
json_unsafe = {
'uuid_empty': lluuid.UUID(),
'uuid_full': lluuid.UUID('dc61ab0530200d7554d23510559102c1a98aab1b'),
'binary_empty': llsd.binary(),
'binary': llsd.binary('f\0\xff'),
'uri_empty': llsd.uri(),
'uri': llsd.uri('http://www.secondlife.com/'),
'datetime_empty': datetime.datetime(1970,1,1),
'datetime': datetime.datetime(1999,9,9,9,9,9),
}
json_unsafe.update(json_safe)
json_unsafe['array'] = json_unsafe.values()
json_unsafe['tuple'] = tuple(json_unsafe.values())
json_unsafe['dict'] = json_unsafe.copy()
json_unsafe['iter'] = iter(json_unsafe.values())
def _test_client_content_type_good(self, content_type, ll):
def run(ll):
req = siesta.Request.blank('/')
req.environ['REQUEST_METHOD'] = 'POST'
req.content_type = content_type
req.llsd = ll
req.accept = content_type
resp = req.get_response(self.server)
self.assertEquals(resp.status_int, 200)
return req, resp
if False and isinstance(ll, dict):
def fixup(v):
if isinstance(v, float):
return '%.5f' % v
if isinstance(v, long):
return int(v)
if isinstance(v, (llsd.binary, llsd.uri)):
return v
if isinstance(v, (tuple, list)):
return [fixup(i) for i in v]
if isinstance(v, dict):
return dict([(k, fixup(i)) for k, i in v.iteritems()])
return v
for k, v in ll.iteritems():
l = [k, v]
req, resp = run(l)
self.assertEquals(fixup(resp.llsd), fixup(l))
run(ll)
def test_client_content_type_json_good(self):
self._test_client_content_type_good('application/json', self.json_safe)
def test_client_content_type_llsd_xml_good(self):
self._test_client_content_type_good('application/llsd+xml',
self.json_unsafe)
def test_client_content_type_llsd_notation_good(self):
self._test_client_content_type_good('application/llsd+notation',
self.json_unsafe)
def test_client_content_type_llsd_binary_good(self):
self._test_client_content_type_good('application/llsd+binary',
self.json_unsafe)
def test_client_content_type_xml_good(self):
self._test_client_content_type_good('application/xml',
self.json_unsafe)
def _test_client_content_type_bad(self, content_type):
req = siesta.Request.blank('/')
req.environ['REQUEST_METHOD'] = 'POST'
req.body = '\0invalid nonsense under all encodings'
req.content_type = content_type
self.assertEquals(req.get_response(self.server).status_int,
exc.HTTPBadRequest.code)
def test_client_content_type_json_bad(self):
self._test_client_content_type_bad('application/json')
def test_client_content_type_llsd_xml_bad(self):
self._test_client_content_type_bad('application/llsd+xml')
def test_client_content_type_llsd_notation_bad(self):
self._test_client_content_type_bad('application/llsd+notation')
def test_client_content_type_llsd_binary_bad(self):
self._test_client_content_type_bad('application/llsd+binary')
def test_client_content_type_xml_bad(self):
self._test_client_content_type_bad('application/xml')
def test_client_content_type_bad(self):
req = siesta.Request.blank('/')
req.environ['REQUEST_METHOD'] = 'POST'
req.body = 'XXX'
req.content_type = 'application/nonsense'
self.assertEquals(req.get_response(self.server).status_int,
exc.HTTPUnsupportedMediaType.code)
def test_request_default_content_type(self):
req = siesta.Request.blank('/')
self.assertEquals(req.content_type, req.default_content_type)
def test_request_default_accept(self):
req = siesta.Request.blank('/')
from webob import acceptparse
self.assertEquals(str(req.accept).replace(' ', ''),
req.default_accept.replace(' ', ''))
def test_request_llsd_auto_body(self):
req = siesta.Request.blank('/')
req.llsd = {'a': 2}
self.assertEquals(req.body, '<?xml version="1.0" ?><llsd><map>'
'<key>a</key><integer>2</integer></map></llsd>')
def test_request_llsd_mod_body_changes_llsd(self):
req = siesta.Request.blank('/')
req.llsd = {'a': 2}
req.body = '<?xml version="1.0" ?><llsd><integer>1337</integer></llsd>'
self.assertEquals(req.llsd, 1337)
def test_request_bad_llsd_fails(self):
def crashme(ctype):
def boom():
class foo(object): pass
req = siesta.Request.blank('/')
req.content_type = ctype
req.llsd = foo()
for mime_type in siesta.llsd_parsers:
self.assertRaises(TypeError, crashme(mime_type))
class ClassServer(TestBase, unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.server = siesta.llsd_class(ClassApp)
class CallableServer(TestBase, unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.server = siesta.llsd_callable(callable_app)
class RouterServer(unittest.TestCase):
def test_router(self):
def foo(req, quux):
print quux
r = siesta.Router()
r.add('/foo/{quux:int}', siesta.llsd_callable(foo), methods=['GET'])
req = siesta.Request.blank('/foo/33')
req.get_response(r)
req = siesta.Request.blank('/foo/bar')
self.assertEquals(req.get_response(r).status_int,
exc.HTTPNotFound.code)
if __name__ == '__main__':
unittest.main()

View File

@@ -1,597 +0,0 @@
"""
@file webdav.py
@brief Classes to make manipulation of a webdav store easier.
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import sys, os, httplib, urlparse
import socket, time
import xml.dom.minidom
import syslog
# import signal
__revision__ = '0'
dav_debug = False
# def urlsafe_b64decode (enc):
# return base64.decodestring (enc.replace ('_', '/').replace ('-', '+'))
# def urlsafe_b64encode (str):
# return base64.encodestring (str).replace ('+', '-').replace ('/', '_')
class DAVError (Exception):
""" Base class for exceptions in this module. """
def __init__ (self, status=0, message='', body='', details=''):
self.status = status
self.message = message
self.body = body
self.details = details
Exception.__init__ (self, '%d:%s:%s%s' % (self.status, self.message,
self.body, self.details))
def print_to_stderr (self):
""" print_to_stderr docstring """
print >> sys.stderr, str (self.status) + ' ' + self.message
print >> sys.stderr, str (self.details)
class Timeout (Exception):
""" Timeout docstring """
def __init__ (self, arg=''):
Exception.__init__ (self, arg)
def alarm_handler (signum, frame):
""" alarm_handler docstring """
raise Timeout ('caught alarm')
class WebDAV:
""" WebDAV docstring """
def __init__ (self, url, proxy=None, retries_before_fail=6):
self.init_url = url
self.init_proxy = proxy
self.retries_before_fail = retries_before_fail
url_parsed = urlparse.urlsplit (url)
self.top_path = url_parsed[ 2 ]
# make sure top_path has a trailing /
if self.top_path == None or self.top_path == '':
self.top_path = '/'
elif len (self.top_path) > 1 and self.top_path[-1:] != '/':
self.top_path += '/'
if dav_debug:
syslog.syslog ('new WebDAV %s : %s' % (str (url), str (proxy)))
if proxy:
proxy_parsed = urlparse.urlsplit (proxy)
self.host_header = url_parsed[ 1 ]
host_and_port = proxy_parsed[ 1 ].split (':')
self.host = host_and_port[ 0 ]
if len (host_and_port) > 1:
self.port = int(host_and_port[ 1 ])
else:
self.port = 80
else: # no proxy
host_and_port = url_parsed[ 1 ].split (':')
self.host_header = None
self.host = host_and_port[ 0 ]
if len (host_and_port) > 1:
self.port = int(host_and_port[ 1 ])
else:
self.port = 80
self.connection = False
self.connect ()
def log (self, msg, depth=0):
""" log docstring """
if dav_debug and depth == 0:
host = str (self.init_url)
if host == 'http://int.tuco.lindenlab.com:80/asset/':
host = 'tuco'
if host == 'http://harriet.lindenlab.com/asset-keep/':
host = 'harriet/asset-keep'
if host == 'http://harriet.lindenlab.com/asset-flag/':
host = 'harriet/asset-flag'
if host == 'http://harriet.lindenlab.com/asset/':
host = 'harriet/asset'
if host == 'http://ozzy.lindenlab.com/asset/':
host = 'ozzy/asset'
if host == 'http://station11.lindenlab.com:12041/:':
host = 'station11:12041'
proxy = str (self.init_proxy)
if proxy == 'None':
proxy = ''
if proxy == 'http://int.tuco.lindenlab.com:3128/':
proxy = 'tuco'
syslog.syslog ('WebDAV (%s:%s) %s' % (host, proxy, str (msg)))
def connect (self):
""" connect docstring """
self.log ('connect')
self.connection = httplib.HTTPConnection (self.host, self.port)
def __err (self, response, details):
""" __err docstring """
raise DAVError (response.status, response.reason, response.read (),
str (self.init_url) + ':' + \
str (self.init_proxy) + ':' + str (details))
def request (self, method, path, body=None, headers=None,
read_all=True, body_hook = None, recurse=0, allow_cache=True):
""" request docstring """
# self.log ('request %s %s' % (method, path))
if headers == None:
headers = {}
if not allow_cache:
headers['Pragma'] = 'no-cache'
headers['cache-control'] = 'no-cache'
try:
if method.lower () != 'purge':
if path.startswith ('/'):
path = path[1:]
if self.host_header: # use proxy
headers[ 'host' ] = self.host_header
fullpath = 'http://%s%s%s' % (self.host_header,
self.top_path, path)
else: # no proxy
fullpath = self.top_path + path
else:
fullpath = path
self.connection.request (method, fullpath, body, headers)
if body_hook:
body_hook ()
# signal.signal (signal.SIGALRM, alarm_handler)
# try:
# signal.alarm (120)
# signal.alarm (0)
# except Timeout, e:
# if recurse < 6:
# return self.retry_request (method, path, body, headers,
# read_all, body_hook, recurse)
# else:
# raise DAVError (0, 'timeout', self.host,
# (method, path, body, headers, recurse))
response = self.connection.getresponse ()
if read_all:
while len (response.read (1024)) > 0:
pass
if (response.status == 500 or \
response.status == 503 or \
response.status == 403) and \
recurse < self.retries_before_fail:
return self.retry_request (method, path, body, headers,
read_all, body_hook, recurse)
return response
except (httplib.ResponseNotReady,
httplib.BadStatusLine,
socket.error):
# if the server hangs up on us (keepalive off, broken pipe),
# we need to reconnect and try again.
if recurse < self.retries_before_fail:
return self.retry_request (method, path, body, headers,
read_all, body_hook, recurse)
raise DAVError (0, 'reconnect failed', self.host,
(method, path, body, headers, recurse))
def retry_request (self, method, path, body, headers,
read_all, body_hook, recurse):
""" retry_request docstring """
time.sleep (10.0 * recurse)
self.connect ()
return self.request (method, path, body, headers,
read_all, body_hook, recurse+1)
def propfind (self, path, body=None, depth=1):
""" propfind docstring """
# self.log ('propfind %s' % path)
headers = {'Content-Type':'text/xml; charset="utf-8"',
'Depth':str(depth)}
response = self.request ('PROPFIND', path, body, headers, False)
if response.status == 207:
return response # Multi-Status
self.__err (response, ('PROPFIND', path, body, headers, 0))
def purge (self, path):
""" issue a squid purge command """
headers = {'Accept':'*/*'}
response = self.request ('PURGE', path, None, headers)
if response.status == 200 or response.status == 404:
# 200 if it was purge, 404 if it wasn't there.
return response
self.__err (response, ('PURGE', path, None, headers))
def get_file_size (self, path):
"""
Use propfind to ask a webdav server what the size of
a file is. If used on a directory (collection) return 0
"""
self.log ('get_file_size %s' % path)
# "getcontentlength" property
# 8.1.1 Example - Retrieving Named Properties
# http://docs.python.org/lib/module-xml.dom.html
nsurl = 'http://apache.org/dav/props/'
doc = xml.dom.minidom.Document ()
propfind_element = doc.createElementNS (nsurl, "D:propfind")
propfind_element.setAttributeNS (nsurl, 'xmlns:D', 'DAV:')
doc.appendChild (propfind_element)
prop_element = doc.createElementNS (nsurl, "D:prop")
propfind_element.appendChild (prop_element)
con_len_element = doc.createElementNS (nsurl, "D:getcontentlength")
prop_element.appendChild (con_len_element)
response = self.propfind (path, doc.toxml ())
doc.unlink ()
resp_doc = xml.dom.minidom.parseString (response.read ())
cln = resp_doc.getElementsByTagNameNS ('DAV:','getcontentlength')[ 0 ]
try:
content_length = int (cln.childNodes[ 0 ].nodeValue)
except IndexError:
return 0
resp_doc.unlink ()
return content_length
def file_exists (self, path):
"""
do an http head on the given file. return True if it succeeds
"""
self.log ('file_exists %s' % path)
expect_gzip = path.endswith ('.gz')
response = self.request ('HEAD', path)
got_gzip = response.getheader ('Content-Encoding', '').strip ()
if got_gzip.lower () == 'x-gzip' and expect_gzip == False:
# the asset server fakes us out if we ask for the non-gzipped
# version of an asset, but the server has the gzipped version.
return False
return response.status == 200
def mkdir (self, path):
""" mkdir docstring """
self.log ('mkdir %s' % path)
headers = {}
response = self.request ('MKCOL', path, None, headers)
if response.status == 201:
return # success
if response.status == 405:
return # directory already existed?
self.__err (response, ('MKCOL', path, None, headers, 0))
def delete (self, path):
""" delete docstring """
self.log ('delete %s' % path)
headers = {'Depth':'infinity'} # collections require infinity
response = self.request ('DELETE', path, None, headers)
if response.status == 204:
return # no content
if response.status == 404:
return # hmm
self.__err (response, ('DELETE', path, None, headers, 0))
def list_directory (self, path, dir_filter=None, allow_cache=True,
minimum_cache_time=False):
"""
Request an http directory listing and parse the filenames out of lines
like: '<LI><A HREF="X"> X</A>'. If a filter function is provided,
only return filenames that the filter returns True for.
This is sort of grody, but it seems faster than other ways of getting
this information from an isilon.
"""
self.log ('list_directory %s' % path)
def try_match (lline, before, after):
""" try_match docstring """
try:
blen = len (before)
asset_start_index = lline.index (before)
asset_end_index = lline.index (after, asset_start_index + blen)
asset = line[ asset_start_index + blen : asset_end_index ]
if not dir_filter or dir_filter (asset):
return [ asset ]
return []
except ValueError:
return []
if len (path) > 0 and path[-1:] != '/':
path += '/'
response = self.request ('GET', path, None, {}, False,
allow_cache=allow_cache)
if allow_cache and minimum_cache_time: # XXX
print response.getheader ('Date')
# s = "2005-12-06T12:13:14"
# from datetime import datetime
# from time import strptime
# datetime(*strptime(s, "%Y-%m-%dT%H:%M:%S")[0:6])
# datetime.datetime(2005, 12, 6, 12, 13, 14)
if response.status != 200:
self.__err (response, ('GET', path, None, {}, 0))
assets = []
for line in response.read ().split ('\n'):
lline = line.lower ()
if lline.find ("parent directory") == -1:
# isilon file
assets += try_match (lline, '<li><a href="', '"> ')
# apache dir
assets += try_match (lline, 'alt="[dir]"> <a href="', '/">')
# apache file
assets += try_match (lline, 'alt="[ ]"> <a href="', '">')
return assets
def __tmp_filename (self, path_and_file):
""" __tmp_filename docstring """
head, tail = os.path.split (path_and_file)
if head != '':
return head + '/.' + tail + '.' + str (os.getpid ())
else:
return head + '.' + tail + '.' + str (os.getpid ())
def __put__ (self, filesize, body_hook, remotefile):
""" __put__ docstring """
headers = {'Content-Length' : str (filesize)}
remotefile_tmp = self.__tmp_filename (remotefile)
response = self.request ('PUT', remotefile_tmp, None,
headers, True, body_hook)
if not response.status in (201, 204): # created, no content
self.__err (response, ('PUT', remotefile, None, headers, 0))
if filesize != self.get_file_size (remotefile_tmp):
try:
self.delete (remotefile_tmp)
except:
pass
raise DAVError (0, 'tmp upload error', remotefile_tmp)
# move the file to its final location
try:
self.rename (remotefile_tmp, remotefile)
except DAVError, exc:
if exc.status == 403: # try to clean up the tmp file
try:
self.delete (remotefile_tmp)
except:
pass
raise
if filesize != self.get_file_size (remotefile):
raise DAVError (0, 'file upload error', str (remotefile_tmp))
def put_string (self, strng, remotefile):
""" put_string docstring """
self.log ('put_string %d -> %s' % (len (strng), remotefile))
filesize = len (strng)
def body_hook ():
""" body_hook docstring """
self.connection.send (strng)
self.__put__ (filesize, body_hook, remotefile)
def put_file (self, localfile, remotefile):
"""
Send a local file to a remote webdav store. First, upload to
a temporary filename. Next make sure the file is the size we
expected. Next, move the file to its final location. Next,
check the file size at the final location.
"""
self.log ('put_file %s -> %s' % (localfile, remotefile))
filesize = os.path.getsize (localfile)
def body_hook ():
""" body_hook docstring """
handle = open (localfile)
while True:
data = handle.read (1300)
if len (data) == 0:
break
self.connection.send (data)
handle.close ()
self.__put__ (filesize, body_hook, remotefile)
def create_empty_file (self, remotefile):
""" create an empty file """
self.log ('touch_file %s' % (remotefile))
headers = {'Content-Length' : '0'}
response = self.request ('PUT', remotefile, None, headers)
if not response.status in (201, 204): # created, no content
self.__err (response, ('PUT', remotefile, None, headers, 0))
if self.get_file_size (remotefile) != 0:
raise DAVError (0, 'file upload error', str (remotefile))
def __get_file_setup (self, remotefile, check_size=True):
""" __get_file_setup docstring """
if check_size:
remotesize = self.get_file_size (remotefile)
response = self.request ('GET', remotefile, None, {}, False)
if response.status != 200:
self.__err (response, ('GET', remotefile, None, {}, 0))
try:
content_length = int (response.getheader ("Content-Length"))
except TypeError:
content_length = None
if check_size:
if content_length != remotesize:
raise DAVError (0, 'file DL size error', remotefile)
return (response, content_length)
def __get_file_read (self, writehandle, response, content_length):
""" __get_file_read docstring """
if content_length != None:
so_far_length = 0
while so_far_length < content_length:
data = response.read (content_length - so_far_length)
if len (data) == 0:
raise DAVError (0, 'short file download')
so_far_length += len (data)
writehandle.write (data)
while len (response.read ()) > 0:
pass
else:
while True:
data = response.read ()
if (len (data) < 1):
break
writehandle.write (data)
def get_file (self, remotefile, localfile, check_size=True):
"""
Get a remote file from a webdav server. Download to a local
tmp file, then move into place. Sanity check file sizes as
we go.
"""
self.log ('get_file %s -> %s' % (remotefile, localfile))
(response, content_length) = \
self.__get_file_setup (remotefile, check_size)
localfile_tmp = self.__tmp_filename (localfile)
handle = open (localfile_tmp, 'w')
self.__get_file_read (handle, response, content_length)
handle.close ()
if check_size:
if content_length != os.path.getsize (localfile_tmp):
raise DAVError (0, 'file DL size error',
remotefile+','+localfile)
os.rename (localfile_tmp, localfile)
def get_file_as_string (self, remotefile, check_size=True):
"""
download a file from a webdav server and return it as a string.
"""
self.log ('get_file_as_string %s' % remotefile)
(response, content_length) = \
self.__get_file_setup (remotefile, check_size)
# (tmp_handle, tmp_filename) = tempfile.mkstemp ()
tmp_handle = os.tmpfile ()
self.__get_file_read (tmp_handle, response, content_length)
tmp_handle.seek (0)
ret = tmp_handle.read ()
tmp_handle.close ()
# os.unlink (tmp_filename)
return ret
def get_post_as_string (self, remotefile, body):
"""
Do an http POST, send body, get response and return it.
"""
self.log ('get_post_as_string %s' % remotefile)
# headers = {'Content-Type':'application/x-www-form-urlencoded'}
headers = {'Content-Type':'text/xml; charset="utf-8"'}
# b64body = urlsafe_b64encode (asset_url)
response = self.request ('POST', remotefile, body, headers, False)
if response.status != 200:
self.__err (response, ('POST', remotefile, body, headers, 0))
try:
content_length = int (response.getheader ('Content-Length'))
except TypeError:
content_length = None
tmp_handle = os.tmpfile ()
self.__get_file_read (tmp_handle, response, content_length)
tmp_handle.seek (0)
ret = tmp_handle.read ()
tmp_handle.close ()
return ret
def __destination_command (self, verb, remotesrc, dstdav, remotedst):
"""
self and dstdav should point to the same http server.
"""
if len (remotedst) > 0 and remotedst[ 0 ] == '/':
remotedst = remotedst[1:]
headers = {'Destination': 'http://%s:%d%s%s' % (dstdav.host,
dstdav.port,
dstdav.top_path,
remotedst)}
response = self.request (verb, remotesrc, None, headers)
if response.status == 201:
return # created
if response.status == 204:
return # no content
self.__err (response, (verb, remotesrc, None, headers, 0))
def rename (self, remotesrc, remotedst):
""" rename a file on a webdav server """
self.log ('rename %s -> %s' % (remotesrc, remotedst))
self.__destination_command ('MOVE', remotesrc, self, remotedst)
def xrename (self, remotesrc, dstdav, remotedst):
""" rename a file on a webdav server """
self.log ('xrename %s -> %s' % (remotesrc, remotedst))
self.__destination_command ('MOVE', remotesrc, dstdav, remotedst)
def copy (self, remotesrc, remotedst):
""" copy a file on a webdav server """
self.log ('copy %s -> %s' % (remotesrc, remotedst))
self.__destination_command ('COPY', remotesrc, self, remotedst)
def xcopy (self, remotesrc, dstdav, remotedst):
""" copy a file on a webdav server """
self.log ('xcopy %s -> %s' % (remotesrc, remotedst))
self.__destination_command ('COPY', remotesrc, dstdav, remotedst)
def put_string (data, url):
"""
upload string s to a url
"""
url_parsed = urlparse.urlsplit (url)
dav = WebDAV ('%s://%s/' % (url_parsed[ 0 ], url_parsed[ 1 ]))
dav.put_string (data, url_parsed[ 2 ])
def get_string (url, check_size=True):
"""
return the contents of a url as a string
"""
url_parsed = urlparse.urlsplit (url)
dav = WebDAV ('%s://%s/' % (url_parsed[ 0 ], url_parsed[ 1 ]))
return dav.get_file_as_string (url_parsed[ 2 ], check_size)

View File

@@ -1,273 +0,0 @@
"""\
@file xml_rpc.py
@brief An implementation of a parser/generator for the XML-RPC xml format.
$LicenseInfo:firstyear=2006&license=mit$
Copyright (c) 2006-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
from greenlet import greenlet
from mulib import mu
from xml.sax import handler
from xml.sax import parseString
# States
class Expected(object):
def __init__(self, tag):
self.tag = tag
def __getattr__(self, name):
return type(self)(name)
def __repr__(self):
return '%s(%r)' % (
type(self).__name__, self.tag)
class START(Expected):
pass
class END(Expected):
pass
class STR(object):
tag = ''
START = START('')
END = END('')
class Malformed(Exception):
pass
class XMLParser(handler.ContentHandler):
def __init__(self, state_machine, next_states):
handler.ContentHandler.__init__(self)
self.state_machine = state_machine
if not isinstance(next_states, tuple):
next_states = (next_states, )
self.next_states = next_states
self._character_buffer = ''
def assertState(self, state, name, *rest):
if not isinstance(self.next_states, tuple):
self.next_states = (self.next_states, )
for next in self.next_states:
if type(state) == type(next):
if next.tag and next.tag != name:
raise Malformed(
"Expected %s, got %s %s %s" % (
next, state, name, rest))
break
else:
raise Malformed(
"Expected %s, got %s %s %s" % (
self.next_states, state, name, rest))
def startElement(self, name, attrs):
self.assertState(START, name.lower(), attrs)
self.next_states = self.state_machine.switch(START, (name.lower(), dict(attrs)))
def endElement(self, name):
if self._character_buffer.strip():
characters = self._character_buffer.strip()
self._character_buffer = ''
self.assertState(STR, characters)
self.next_states = self.state_machine.switch(characters)
self.assertState(END, name.lower())
self.next_states = self.state_machine.switch(END, name.lower())
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
def characters(self, characters):
self._character_buffer += characters
def parse(what):
child = greenlet(xml_rpc)
me = greenlet.getcurrent()
startup_states = child.switch(me)
parser = XMLParser(child, startup_states)
try:
parseString(what, parser)
except Malformed:
print what
raise
return child.switch()
def xml_rpc(yielder):
yielder.switch(START.methodcall)
yielder.switch(START.methodname)
methodName = yielder.switch(STR)
yielder.switch(END.methodname)
yielder.switch(START.params)
root = None
params = []
while True:
state, _ = yielder.switch(START.param, END.params)
if state == END:
break
yielder.switch(START.value)
params.append(
handle(yielder))
yielder.switch(END.value)
yielder.switch(END.param)
yielder.switch(END.methodcall)
## Resume parse
yielder.switch()
## Return result to parse
return methodName.strip(), params
def handle(yielder):
_, (tag, attrs) = yielder.switch(START)
if tag in ['int', 'i4']:
result = int(yielder.switch(STR))
elif tag == 'boolean':
result = bool(int(yielder.switch(STR)))
elif tag == 'string':
result = yielder.switch(STR)
elif tag == 'double':
result = float(yielder.switch(STR))
elif tag == 'datetime.iso8601':
result = yielder.switch(STR)
elif tag == 'base64':
result = base64.b64decode(yielder.switch(STR))
elif tag == 'struct':
result = {}
while True:
state, _ = yielder.switch(START.member, END.struct)
if state == END:
break
yielder.switch(START.name)
key = yielder.switch(STR)
yielder.switch(END.name)
yielder.switch(START.value)
result[key] = handle(yielder)
yielder.switch(END.value)
yielder.switch(END.member)
## We already handled </struct> above, don't want to handle it below
return result
elif tag == 'array':
result = []
yielder.switch(START.data)
while True:
state, _ = yielder.switch(START.value, END.data)
if state == END:
break
result.append(handle(yielder))
yielder.switch(END.value)
yielder.switch(getattr(END, tag))
return result
VALUE = mu.tag_factory('value')
BOOLEAN = mu.tag_factory('boolean')
INT = mu.tag_factory('int')
STRUCT = mu.tag_factory('struct')
MEMBER = mu.tag_factory('member')
NAME = mu.tag_factory('name')
ARRAY = mu.tag_factory('array')
DATA = mu.tag_factory('data')
STRING = mu.tag_factory('string')
DOUBLE = mu.tag_factory('double')
METHODRESPONSE = mu.tag_factory('methodResponse')
PARAMS = mu.tag_factory('params')
PARAM = mu.tag_factory('param')
mu.inline_elements['string'] = True
mu.inline_elements['boolean'] = True
mu.inline_elements['name'] = True
def _generate(something):
if isinstance(something, dict):
result = STRUCT()
for key, value in something.items():
result[
MEMBER[
NAME[key], _generate(value)]]
return VALUE[result]
elif isinstance(something, list):
result = DATA()
for item in something:
result[_generate(item)]
return VALUE[ARRAY[[result]]]
elif isinstance(something, basestring):
return VALUE[STRING[something]]
elif isinstance(something, bool):
if something:
return VALUE[BOOLEAN['1']]
return VALUE[BOOLEAN['0']]
elif isinstance(something, int):
return VALUE[INT[something]]
elif isinstance(something, float):
return VALUE[DOUBLE[something]]
def generate(*args):
params = PARAMS()
for arg in args:
params[PARAM[_generate(arg)]]
return METHODRESPONSE[params]
if __name__ == '__main__':
print parse("""<?xml version="1.0"?> <methodCall> <methodName>examples.getStateName</methodName> <params> <param> <value><i4>41</i4></value> </param> </params> </methodCall>
""")

View File

@@ -1,64 +0,0 @@
"""\
@file fastest_elementtree.py
@brief Concealing some gnarly import logic in here. This should export the interface of elementtree.
$LicenseInfo:firstyear=2008&license=mit$
Copyright (c) 2008-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
# The parsing exception raised by the underlying library depends
# on the ElementTree implementation we're using, so we provide an
# alias here.
#
# Use ElementTreeError as the exception type for catching parsing
# errors.
# Using cElementTree might cause some unforeseen problems, so here's a
# convenient off switch.
use_celementree = True
try:
if not use_celementree:
raise ImportError()
# Python 2.3 and 2.4.
from cElementTree import *
ElementTreeError = SyntaxError
except ImportError:
try:
if not use_celementree:
raise ImportError()
# Python 2.5 and above.
from xml.etree.cElementTree import *
ElementTreeError = SyntaxError
except ImportError:
# Pure Python code.
try:
# Python 2.3 and 2.4.
from elementtree.ElementTree import *
except ImportError:
# Python 2.5 and above.
from xml.etree.ElementTree import *
# The pure Python ElementTree module uses Expat for parsing.
from xml.parsers.expat import ExpatError as ElementTreeError

View File

@@ -1,52 +0,0 @@
"""\
@file helpformatter.py
@author Phoenix
@brief Class for formatting optparse descriptions.
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import optparse
import textwrap
class Formatter(optparse.IndentedHelpFormatter):
def __init__(
self,
p_indentIncrement = 2,
p_maxHelpPosition = 24,
p_width = 79,
p_shortFirst = 1) :
optparse.HelpFormatter.__init__(
self,
p_indentIncrement,
p_maxHelpPosition,
p_width,
p_shortFirst)
def format_description(self, p_description):
t_descWidth = self.width - self.current_indent
t_indent = " " * (self.current_indent + 2)
return "\n".join(
[textwrap.fill(descr, t_descWidth, initial_indent = t_indent,
subsequent_indent = t_indent)
for descr in p_description.split("\n")] )

View File

@@ -1,63 +0,0 @@
"""\
@file iterators.py
@brief Useful general-purpose iterators.
$LicenseInfo:firstyear=2008&license=mit$
Copyright (c) 2008-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
from __future__ import nested_scopes
def iter_chunks(rows, aggregate_size=100):
"""
Given an iterable set of items (@p rows), produces lists of up to @p
aggregate_size items at a time, for example:
iter_chunks([1,2,3,4,5,6,7,8,9,10], 3)
Values for @p aggregate_size < 1 will raise ValueError.
Will return a generator that produces, in the following order:
- [1, 2, 3]
- [4, 5, 6]
- [7, 8, 9]
- [10]
"""
if aggregate_size < 1:
raise ValueError()
def iter_chunks_inner():
row_iter = iter(rows)
done = False
agg = []
while not done:
try:
row = row_iter.next()
agg.append(row)
except StopIteration:
done = True
if agg and (len(agg) >= aggregate_size or done):
yield agg
agg = []
return iter_chunks_inner()

View File

@@ -1,72 +0,0 @@
"""\
@file iterators_test.py
@brief Test cases for iterators module.
$LicenseInfo:firstyear=2008&license=mit$
Copyright (c) 2008-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import unittest
from indra.util.iterators import iter_chunks
class TestIterChunks(unittest.TestCase):
"""Unittests for iter_chunks"""
def test_bad_agg_size(self):
rows = [1,2,3,4]
self.assertRaises(ValueError, iter_chunks, rows, 0)
self.assertRaises(ValueError, iter_chunks, rows, -1)
try:
for i in iter_chunks(rows, 0):
pass
except ValueError:
pass
else:
self.fail()
try:
result = list(iter_chunks(rows, 0))
except ValueError:
pass
else:
self.fail()
def test_empty(self):
rows = []
result = list(iter_chunks(rows))
self.assertEqual(result, [])
def test_small(self):
rows = [[1]]
result = list(iter_chunks(rows, 2))
self.assertEqual(result, [[[1]]])
def test_size(self):
rows = [[1],[2]]
result = list(iter_chunks(rows, 2))
self.assertEqual(result, [[[1],[2]]])
def test_multi_agg(self):
rows = [[1],[2],[3],[4],[5]]
result = list(iter_chunks(rows, 2))
self.assertEqual(result, [[[1],[2]],[[3],[4]],[[5]]])
if __name__ == "__main__":
unittest.main()

View File

@@ -33,21 +33,26 @@ import filecmp
import fnmatch
import getopt
import glob
import itertools
import operator
import os
import re
import shutil
import subprocess
import sys
import tarfile
import errno
import subprocess
class ManifestError(RuntimeError):
"""Use an exception more specific than generic Python RuntimeError"""
pass
def __init__(self, msg):
self.msg = msg
super(ManifestError, self).__init__(self.msg)
class MissingError(ManifestError):
"""You specified a file that doesn't exist"""
pass
def __init__(self, msg):
self.msg = msg
super(MissingError, self).__init__(self.msg)
def path_ancestors(path):
drive, path = os.path.splitdrive(os.path.normpath(path))
@@ -88,7 +93,7 @@ DEFAULT_SRCTREE = os.path.dirname(sys.argv[0])
CHANNEL_VENDOR_BASE = 'Singularity'
RELEASE_CHANNEL = CHANNEL_VENDOR_BASE + ' Release'
ARGUMENTS=[
BASE_ARGUMENTS=[
dict(name='actions',
description="""This argument specifies the actions that are to be taken when the
script is run. The meaningful actions are currently:
@@ -106,9 +111,17 @@ ARGUMENTS=[
Example use: %(name)s --arch=i686
On Linux this would try to use Linux_i686Manifest.""",
default=""),
dict(name='artwork', description='Artwork directory.', default=DEFAULT_SRCTREE),
dict(name='branding_id', description="Identifier for the branding set to use.", default='singularity'),
dict(name='build', description='Build directory.', default=DEFAULT_SRCTREE),
dict(name='buildtype', description='Build type (i.e. Debug, Release, RelWithDebInfo).', default=None),
dict(name='channel',
description="""The channel to use for updates, packaging, settings name, etc.""",
default='CHANNEL UNSET'),
dict(name='channel_suffix',
description="""Addition to the channel for packaging and channel value,
but not application name (used internally)""",
default=None),
dict(name='configuration',
description="""The build configuration used.""",
default="Release"),
@@ -116,12 +129,6 @@ ARGUMENTS=[
dict(name='grid',
description="""Which grid the client will try to connect to.""",
default=None),
dict(name='channel',
description="""The channel to use for updates, packaging, settings name, etc.""",
default='CHANNEL UNSET'),
dict(name='channel_suffix',
description="""Addition to the channel for packaging and channel value, but not application name (used internally)""",
default=None),
dict(name='installer_name',
description=""" The name of the file that the installer should be
packaged up into. Only used on Linux at the moment.""",
@@ -133,13 +140,17 @@ ARGUMENTS=[
description="""The current platform, to be used for looking up which
manifest class to run.""",
default=get_default_platform),
dict(name='signature',
description="""This specifies an identity to sign the viewer with, if any.
If no value is supplied, the default signature will be used, if any. Currently
only used on Mac OS X.""",
default=None),
dict(name='source',
description='Source directory.',
default=DEFAULT_SRCTREE),
dict(name='standalone',
description='Set to ON if this is a standalone build.',
default="OFF"),
dict(name='artwork', description='Artwork directory.', default=DEFAULT_SRCTREE),
dict(name='touch',
description="""File to touch when action is finished. Touch file will
contain the name of the final package in a form suitable
@@ -147,20 +158,15 @@ ARGUMENTS=[
default=None),
dict(name='versionfile',
description="""The name of a file containing the full version number."""),
dict(name='signature',
description="""This specifies an identity to sign the viewer with, if any.
If no value is supplied, the default signature will be used, if any. Currently
only used on Mac OS X.""",
default=None)
]
def usage(srctree=""):
def usage(arguments, srctree=""):
nd = {'name':sys.argv[0]}
print """Usage:
%(name)s [options] [destdir]
Options:
""" % nd
for arg in ARGUMENTS:
for arg in arguments:
default = arg['default']
if hasattr(default, '__call__'):
default = "(computed value) \"" + str(default(srctree)) + '"'
@@ -171,11 +177,15 @@ def usage(srctree=""):
default,
arg['description'] % nd)
def main():
## import itertools
def main(extra=[]):
## print ' '.join((("'%s'" % item) if ' ' in item else item)
## for item in itertools.chain([sys.executable], sys.argv))
option_names = [arg['name'] + '=' for arg in ARGUMENTS]
# Supplement our default command-line switches with any desired by
# application-specific caller.
arguments = list(itertools.chain(BASE_ARGUMENTS, extra))
# Alphabetize them by option name in case we display usage.
arguments.sort(key=operator.itemgetter('name'))
option_names = [arg['name'] + '=' for arg in arguments]
option_names.append('help')
options, remainder = getopt.getopt(sys.argv[1:], "", option_names)
@@ -198,11 +208,11 @@ def main():
# early out for help
if 'help' in args:
# *TODO: it is a huge hack to pass around the srctree like this
usage(args['source'])
usage(arguments, srctree=args['source'])
return
# defaults
for arg in ARGUMENTS:
for arg in arguments:
if arg['name'] not in args:
default = arg['default']
if hasattr(default, '__call__'):
@@ -231,101 +241,68 @@ def main():
print "Option:", opt, "=", args[opt]
# pass in sourceid as an argument now instead of an environment variable
try:
args['sourceid'] = os.environ["sourceid"]
except KeyError:
args['sourceid'] = ""
args['sourceid'] = os.environ.get("sourceid", "")
# Build base package.
touch = args.get('touch')
if touch:
print 'Creating base package'
args['package_id'] = "" # base package has no package ID
print '================ Creating base package'
else:
print '================ Starting base copy'
wm = LLManifest.for_platform(args['platform'], args.get('arch'))(args)
wm.do(*args['actions'])
# Store package file for later if making touched file.
base_package_file = ""
if touch:
print 'Created base package ', wm.package_file
print '================ Created base package ', wm.package_file
base_package_file = "" + wm.package_file
else:
print '================ Finished base copy'
# handle multiple packages if set
try:
additional_packages = os.environ["additional_packages"]
except KeyError:
additional_packages = ""
# ''.split() produces empty list
additional_packages = os.environ.get("additional_packages", "").split()
if additional_packages:
# Determine destination prefix / suffix for additional packages.
base_dest_postfix = args['dest']
base_dest_prefix = ""
base_dest_parts = args['dest'].split(os.sep)
if len(base_dest_parts) > 1:
base_dest_postfix = base_dest_parts[len(base_dest_parts) - 1]
base_dest_prefix = base_dest_parts[0]
i = 1
while i < len(base_dest_parts) - 1:
base_dest_prefix = base_dest_prefix + os.sep + base_dest_parts[i]
i = i + 1
base_dest_parts = list(os.path.split(args['dest']))
base_dest_parts.insert(-1, "{}")
base_dest_template = os.path.join(*base_dest_parts)
# Determine touched prefix / suffix for additional packages.
base_touch_postfix = ""
base_touch_prefix = ""
if touch:
base_touch_postfix = touch
base_touch_parts = touch.split('/')
base_touch_parts = list(os.path.split(touch))
# Because of the special insert() logic below, we don't just want
# [dirpath, basename]; we want [dirpath, directory, basename].
# Further split the dirpath and replace it in the list.
base_touch_parts[0:1] = os.path.split(base_touch_parts[0])
if "arwin" in args['platform']:
if len(base_touch_parts) > 1:
base_touch_postfix = base_touch_parts[len(base_touch_parts) - 1]
base_touch_prefix = base_touch_parts[0]
i = 1
while i < len(base_touch_parts) - 1:
base_touch_prefix = base_touch_prefix + '/' + base_touch_parts[i]
i = i + 1
base_touch_parts.insert(-1, "{}")
else:
if len(base_touch_parts) > 2:
base_touch_postfix = base_touch_parts[len(base_touch_parts) - 2] + '/' + base_touch_parts[len(base_touch_parts) - 1]
base_touch_prefix = base_touch_parts[0]
i = 1
while i < len(base_touch_parts) - 2:
base_touch_prefix = base_touch_prefix + '/' + base_touch_parts[i]
i = i + 1
# Store base channel name.
base_channel_name = args['channel']
# Build each additional package.
package_id_list = additional_packages.split(" ")
args['channel'] = base_channel_name
for package_id in package_id_list:
base_touch_parts.insert(-2, "{}")
base_touch_template = os.path.join(*base_touch_parts)
for package_id in additional_packages:
args['channel_suffix'] = os.environ.get(package_id + "_viewer_channel_suffix")
args['sourceid'] = os.environ.get(package_id + "_sourceid")
args['dest'] = base_dest_template.format(package_id)
if touch:
print '================ Creating additional package for "', package_id, '" in ', args['dest']
else:
print '================ Starting additional copy for "', package_id, '" in ', args['dest']
try:
if package_id + "_viewer_channel_suffix" in os.environ:
args['channel_suffix'] = os.environ[package_id + "_viewer_channel_suffix"]
else:
args['channel_suffix'] = None
if package_id + "_sourceid" in os.environ:
args['sourceid'] = os.environ[package_id + "_sourceid"]
else:
args['sourceid'] = None
args['dest'] = base_dest_prefix + os.sep + package_id + os.sep + base_dest_postfix
except KeyError:
sys.stderr.write("Failed to create package for package_id: %s" % package_id)
sys.stderr.flush()
continue
wm = LLManifest.for_platform(args['platform'], args.get('arch'))(args)
wm.do(*args['actions'])
except Exception as err:
sys.exit(str(err))
if touch:
print 'Creating additional package for "', package_id, '" in ', args['dest']
wm = LLManifest.for_platform(args['platform'], args.get('arch'))(args)
wm.do(*args['actions'])
if touch:
print 'Created additional package ', wm.package_file, ' for ', package_id
faketouch = base_touch_prefix + '/' + package_id + '/' + base_touch_postfix
fp = open(faketouch, 'w')
fp.write('set package_file=%s\n' % wm.package_file)
fp.close()
print '================ Created additional package ', wm.package_file, ' for ', package_id
with open(base_touch_template.format(package_id), 'w') as fp:
fp.write('set package_file=%s\n' % wm.package_file)
else:
print '================ Finished additional copy "', package_id, '" in ', args['dest']
# Write out the package file in this format, so that it can easily be called
# and used in a .bat file - yeah, it sucks, but this is the simplest...
touch = args.get('touch')
if touch:
fp = open(touch, 'w')
fp.write('set package_file=%s\n' % base_package_file)
fp.close()
with open(touch, 'w') as fp:
fp.write('set package_file=%s\n' % base_package_file)
print 'touched', touch
return 0
@@ -371,22 +348,113 @@ class LLManifest(object):
in the file list by path()."""
self.excludes.append(glob)
def prefix(self, src='', build=None, dst=None):
""" Pushes a prefix onto the stack. Until end_prefix is
called, all relevant method calls (esp. to path()) will prefix
paths with the entire prefix stack. Source and destination
prefixes can be different, though if only one is provided they
are both equal. To specify a no-op, use an empty string, not
None."""
if dst is None:
dst = src
if build is None:
build = src
def prefix(self, src='', build='', dst='', src_dst=None):
"""
Usage:
with self.prefix(...args as described...):
self.path(...)
For the duration of the 'with' block, pushes a prefix onto the stack.
Within that block, all relevant method calls (esp. to path()) will
prefix paths with the entire prefix stack. Source and destination
prefixes are independent; if omitted (or passed as the empty string),
the prefix has no effect. Thus:
with self.prefix(src='foo'):
# no effect on dst
with self.prefix(dst='bar'):
# no effect on src
If you want to set both at once, use src_dst:
with self.prefix(src_dst='subdir'):
# same as self.prefix(src='subdir', dst='subdir')
# Passing src_dst makes any src or dst argument in the same
# parameter list irrelevant.
Also supports the older (pre-Python-2.5) syntax:
if self.prefix(...args as described...):
self.path(...)
self.end_prefix(...)
Before the arrival of the 'with' statement, one was required to code
self.prefix() and self.end_prefix() in matching pairs to push and to
pop the prefix stacks, respectively. The older prefix() method
returned True specifically so that the caller could indent the
relevant block of code with 'if', just for aesthetic purposes.
"""
if src_dst is not None:
src = src_dst
dst = src_dst
self.src_prefix.append(src)
self.artwork_prefix.append(src)
self.build_prefix.append(build)
self.dst_prefix.append(dst)
return True # so that you can wrap it in an if to get indentation
## self.display_stacks()
# The above code is unchanged from the original implementation. What's
# new is the return value. We're going to return an instance of
# PrefixManager that binds this LLManifest instance and Does The Right
# Thing on exit.
return self.PrefixManager(self)
def display_stacks(self):
width = 1 + max(len(stack) for stack in self.PrefixManager.stacks)
for stack in self.PrefixManager.stacks:
print "{} {}".format((stack + ':').ljust(width),
os.path.join(*getattr(self, stack)))
class PrefixManager(object):
# stack attributes we manage in this LLManifest (sub)class
# instance
stacks = ("src_prefix", "artwork_prefix", "build_prefix", "dst_prefix")
def __init__(self, manifest):
self.manifest = manifest
# If the caller wrote:
# with self.prefix(...):
# as intended, then bind the state of each prefix stack as it was
# just BEFORE the call to prefix(). Since prefix() appended an
# entry to each prefix stack, capture len()-1.
self.prevlen = { stack: len(getattr(self.manifest, stack)) - 1
for stack in self.stacks }
def __nonzero__(self):
# If the caller wrote:
# if self.prefix(...):
# then a value of this class had better evaluate as 'True'.
return True
def __enter__(self):
# nobody uses 'with self.prefix(...) as variable:'
return None
def __exit__(self, type, value, traceback):
# First, if the 'with' block raised an exception, just propagate.
# Do NOT swallow it.
if type is not None:
return False
# Okay, 'with' block completed successfully. Restore previous
# state of each of the prefix stacks in self.stacks.
# Note that we do NOT simply call pop() on them as end_prefix()
# does. This is to cope with the possibility that the coder
# changed 'if self.prefix(...):' to 'with self.prefix(...):' yet
# forgot to remove the self.end_prefix(...) call at the bottom of
# the block. In that case, calling pop() again would be Bad! But
# if we restore the length of each stack to what it was before the
# current prefix() block, it doesn't matter whether end_prefix()
# was called or not.
for stack, prevlen in self.prevlen.items():
# find the attribute in 'self.manifest' named by 'stack', and
# truncate that list back to 'prevlen'
del getattr(self.manifest, stack)[prevlen:]
## self.manifest.display_stacks()
def end_prefix(self, descr=None):
"""Pops a prefix off the stack. If given an argument, checks
@@ -433,6 +501,19 @@ class LLManifest(object):
relative to the destination directory."""
return os.path.join(self.get_dst_prefix(), relpath)
def _relative_dst_path(self, dstpath):
"""
Returns the path to a file or directory relative to the destination directory.
This should only be used for generating diagnostic output in the path method.
"""
dest_root=self.dst_prefix[0]
if dstpath.startswith(dest_root+os.path.sep):
return dstpath[len(dest_root)+1:]
elif dstpath.startswith(dest_root):
return dstpath[len(dest_root):]
else:
return dstpath
def ensure_src_dir(self, reldir):
"""Construct the path for a directory relative to the
source path, and ensures that it exists. Returns the
@@ -450,29 +531,17 @@ class LLManifest(object):
return path
def run_command(self, command):
""" Runs an external command, and returns the output. Raises
an exception if the command returns a nonzero status code. For
debugging/informational purposes, prints out the command's
output as it is received."""
"""
Runs an external command.
Raises ManifestError exception if the command returns a nonzero status.
"""
print "Running command:", command
sys.stdout.flush()
child = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True)
lines = []
while True:
lines.append(child.stdout.readline())
if lines[-1] == '':
break
else:
print lines[-1],
output = ''.join(lines)
child.stdout.close()
status = child.wait()
if status:
raise ManifestError(
"Command %s returned non-zero status (%s) \noutput:\n%s"
% (command, status, output) )
return output
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as err:
raise ManifestError( "Command %s returned non-zero status (%s)"
% (command, err.returncode) )
def created_path(self, path):
""" Declare that you've created a path in order to
@@ -485,6 +554,7 @@ class LLManifest(object):
def put_in_file(self, contents, dst, src=None):
# write contents as dst
dst_path = self.dst_path_of(dst)
self.cmakedirs(os.path.dirname(dst_path))
f = open(dst_path, "wb")
try:
f.write(contents)
@@ -546,9 +616,16 @@ class LLManifest(object):
# *TODO is this gonna be useful?
print "Cleaning up " + c
def process_either(self, src, dst):
# If it's a real directory, recurse through it --
# but not a symlink! Handle those like files.
if os.path.isdir(src) and not os.path.islink(src):
return self.process_directory(src, dst)
else:
return self.process_file(src, dst)
def process_file(self, src, dst):
if self.includes(src, dst):
# print src, "=>", dst
for action in self.actions:
methodname = action + "_action"
method = getattr(self, methodname, None)
@@ -573,10 +650,7 @@ class LLManifest(object):
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
count += self.process_directory(srcname, dstname)
else:
count += self.process_file(srcname, dstname)
count += self.process_either(srcname, dstname)
return count
def includes(self, src, dst):
@@ -616,16 +690,21 @@ class LLManifest(object):
# Don't recopy file if it's up-to-date.
# If we seem to be not not overwriting files that have been
# updated, set the last arg to False, but it will take longer.
## reldst = (dst[len(self.dst_prefix[0]):]
## if dst.startswith(self.dst_prefix[0])
## else dst).lstrip(r'\/')
if os.path.exists(dst) and filecmp.cmp(src, dst, True):
## print "{} (skipping, {} exists)".format(src, reldst)
return
# only copy if it's not excluded
if self.includes(src, dst):
try:
os.unlink(dst)
except OSError, err:
except OSError as err:
if err.errno != errno.ENOENT:
raise
## print "{} => {}".format(src, reldst)
shutil.copy2(src, dst)
def ccopytree(self, src, dst):
@@ -643,7 +722,7 @@ class LLManifest(object):
dstname = os.path.join(dst, name)
try:
self.ccopymumble(srcname, dstname)
except (IOError, os.error), why:
except (IOError, os.error) as why:
errors.append((srcname, dstname, why))
if errors:
raise ManifestError, errors
@@ -724,13 +803,13 @@ class LLManifest(object):
return self.path(os.path.join(path, file), file)
def path(self, src, dst=None):
sys.stdout.write("Processing %s => %s ... " % (src, dst))
sys.stdout.flush()
if src == None:
raise ManifestError("No source file, dst is " + dst)
if dst == None:
dst = src
dst = os.path.join(self.get_dst_prefix(), dst)
sys.stdout.write("Processing %s => %s ... " % (src, self._relative_dst_path(dst)))
def try_path(src):
# expand globs
@@ -743,29 +822,21 @@ class LLManifest(object):
# if we're specifying a single path (not a glob),
# we should error out if it doesn't exist
self.check_file_exists(src)
# if it's a directory, recurse through it
if os.path.isdir(src):
count += self.process_directory(src, dst)
else:
count += self.process_file(src, dst)
count += self.process_either(src, dst)
return count
for pfx in self.get_src_prefix(), self.get_artwork_prefix(), self.get_build_prefix():
try_prefixes = [self.get_src_prefix(), self.get_artwork_prefix(), self.get_build_prefix()]
tried=[]
count=0
while not count and try_prefixes:
pfx = try_prefixes.pop(0)
try:
count = try_path(os.path.join(pfx, src))
except MissingError:
# If src isn't a wildcard, and if that file doesn't exist in
# this pfx, try next pfx.
count = 0
continue
# Here try_path() didn't raise MissingError. Did it process any files?
if count:
break
# Even though try_path() didn't raise MissingError, it returned 0
# files. src is probably a wildcard meant for some other pfx. Loop
# back to try the next.
tried.append(pfx)
if not try_prefixes:
# no more prefixes left to try
print "unable to find '%s'; looked in:\n %s" % (src, '\n '.join(tried))
print "%d files" % count
# Let caller check whether we processed as many files as expected. In

View File

@@ -1,182 +0,0 @@
#!/usr/bin/env python
"""\
@file llperformance.py
$LicenseInfo:firstyear=2010&license=viewerlgpl$
Second Life Viewer Source Code
Copyright (C) 2010-2011, Linden Research, Inc.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2.1 of the License only.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
$/LicenseInfo$
"""
# ------------------------------------------------
# Sim metrics utility functions.
import glob, os, time, sys, stat, exceptions
from indra.base import llsd
gBlockMap = {} #Map of performance metric data with function hierarchy information.
gCurrentStatPath = ""
gIsLoggingEnabled=False
class LLPerfStat:
def __init__(self,key):
self.mTotalTime = 0
self.mNumRuns = 0
self.mName=key
self.mTimeStamp = int(time.time()*1000)
self.mUTCTime = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
def __str__(self):
return "%f" % self.mTotalTime
def start(self):
self.mStartTime = int(time.time() * 1000000)
self.mNumRuns += 1
def stop(self):
execution_time = int(time.time() * 1000000) - self.mStartTime
self.mTotalTime += execution_time
def get_map(self):
results={}
results['name']=self.mName
results['utc_time']=self.mUTCTime
results['timestamp']=self.mTimeStamp
results['us']=self.mTotalTime
results['count']=self.mNumRuns
return results
class PerfError(exceptions.Exception):
def __init__(self):
return
def __Str__(self):
print "","Unfinished LLPerfBlock"
class LLPerfBlock:
def __init__( self, key ):
global gBlockMap
global gCurrentStatPath
global gIsLoggingEnabled
#Check to see if we're running metrics right now.
if gIsLoggingEnabled:
self.mRunning = True #Mark myself as running.
self.mPreviousStatPath = gCurrentStatPath
gCurrentStatPath += "/" + key
if gCurrentStatPath not in gBlockMap:
gBlockMap[gCurrentStatPath] = LLPerfStat(key)
self.mStat = gBlockMap[gCurrentStatPath]
self.mStat.start()
def finish( self ):
global gBlockMap
global gIsLoggingEnabled
if gIsLoggingEnabled:
self.mStat.stop()
self.mRunning = False
gCurrentStatPath = self.mPreviousStatPath
# def __del__( self ):
# if self.mRunning:
# #SPATTERS FIXME
# raise PerfError
class LLPerformance:
#--------------------------------------------------
# Determine whether or not we want to log statistics
def __init__( self, process_name = "python" ):
self.process_name = process_name
self.init_testing()
self.mTimeStamp = int(time.time()*1000)
self.mUTCTime = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
def init_testing( self ):
global gIsLoggingEnabled
host_performance_file = "/dev/shm/simperf/simperf_proc_config.llsd"
#If file exists, open
if os.path.exists(host_performance_file):
file = open (host_performance_file,'r')
#Read serialized LLSD from file.
body = llsd.parse(file.read())
#Calculate time since file last modified.
stats = os.stat(host_performance_file)
now = time.time()
mod = stats[stat.ST_MTIME]
age = now - mod
if age < ( body['duration'] ):
gIsLoggingEnabled = True
def get ( self ):
global gIsLoggingEnabled
return gIsLoggingEnabled
#def output(self,ptr,path):
# if 'stats' in ptr:
# stats = ptr['stats']
# self.mOutputPtr[path] = stats.get_map()
# if 'children' in ptr:
# children=ptr['children']
# curptr = self.mOutputPtr
# curchildren={}
# curptr['children'] = curchildren
# for key in children:
# curchildren[key]={}
# self.mOutputPtr = curchildren[key]
# self.output(children[key],path + '/' + key)
def done(self):
global gBlockMap
if not self.get():
return
output_name = "/dev/shm/simperf/%s_proc.%d.llsd" % (self.process_name, os.getpid())
output_file = open(output_name, 'w')
process_info = {
"name" : self.process_name,
"pid" : os.getpid(),
"ppid" : os.getppid(),
"timestamp" : self.mTimeStamp,
"utc_time" : self.mUTCTime,
}
output_file.write(llsd.format_notation(process_info))
output_file.write('\n')
for key in gBlockMap.keys():
gBlockMap[key] = gBlockMap[key].get_map()
output_file.write(llsd.format_notation(gBlockMap))
output_file.write('\n')
output_file.close()

View File

@@ -1,117 +0,0 @@
"""\
@file llsubprocess.py
@author Phoenix
@date 2008-01-18
@brief The simplest possible wrapper for a common sub-process paradigm.
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import os
import popen2
import time
import select
class Timeout(RuntimeError):
"Exception raised when a subprocess times out."
pass
def run(command, args=None, data=None, timeout=None):
"""\
@brief Run command with arguments
This is it. This is the function I want to run all the time when doing
subprocces, but end up copying the code everywhere. none of the
standard commands are secure and provide a way to specify input, get
all the output, and get the result.
@param command A string specifying a process to launch.
@param args Arguments to be passed to command. Must be list, tuple or None.
@param data input to feed to the command.
@param timeout Maximum number of many seconds to run.
@return Returns (result, stdout, stderr) from process.
"""
cmd = [command]
if args:
cmd.extend([str(arg) for arg in args])
#print "cmd: ","' '".join(cmd)
child = popen2.Popen3(cmd, True)
#print child.pid
out = []
err = []
result = -1
time_left = timeout
tochild = [child.tochild.fileno()]
while True:
time_start = time.time()
#print "time:",time_left
p_in, p_out, p_err = select.select(
[child.fromchild.fileno(), child.childerr.fileno()],
tochild,
[],
time_left)
if p_in:
new_line = os.read(child.fromchild.fileno(), 32 * 1024)
if new_line:
#print "line:",new_line
out.append(new_line)
new_line = os.read(child.childerr.fileno(), 32 * 1024)
if new_line:
#print "error:", new_line
err.append(new_line)
if p_out:
if data:
#print "p_out"
bytes = os.write(child.tochild.fileno(), data)
data = data[bytes:]
if len(data) == 0:
data = None
tochild = []
child.tochild.close()
result = child.poll()
if result != -1:
# At this point, the child process has exited and result
# is the return value from the process. Between the time
# we called select() and poll() the process may have
# exited so read all the data left on the child process
# stdout and stderr.
last = child.fromchild.read()
if last:
out.append(last)
last = child.childerr.read()
if last:
err.append(last)
child.tochild.close()
child.fromchild.close()
child.childerr.close()
break
if time_left is not None:
time_left -= (time.time() - time_start)
if time_left < 0:
raise Timeout
#print "result:",result
out = ''.join(out)
#print "stdout:", out
err = ''.join(err)
#print "stderr:", err
return result, out, err

View File

@@ -1,592 +0,0 @@
"""\
@file named_query.py
@author Ryan Williams, Phoenix
@date 2007-07-31
@brief An API for running named queries.
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import errno
import MySQLdb
import MySQLdb.cursors
import os
import os.path
import re
import time
from indra.base import llsd
from indra.base import config
DEBUG = False
NQ_FILE_SUFFIX = config.get('named-query-file-suffix', '.nq')
NQ_FILE_SUFFIX_LEN = len(NQ_FILE_SUFFIX)
_g_named_manager = None
def _init_g_named_manager(sql_dir = None):
"""Initializes a global NamedManager object to point at a
specified named queries hierarchy.
This function is intended entirely for testing purposes,
because it's tricky to control the config from inside a test."""
global NQ_FILE_SUFFIX
NQ_FILE_SUFFIX = config.get('named-query-file-suffix', '.nq')
global NQ_FILE_SUFFIX_LEN
NQ_FILE_SUFFIX_LEN = len(NQ_FILE_SUFFIX)
if sql_dir is None:
sql_dir = config.get('named-query-base-dir')
# extra fallback directory in case config doesn't return what we want
if sql_dir is None:
sql_dir = os.path.abspath(
os.path.join(
os.path.realpath(os.path.dirname(__file__)), "..", "..", "..", "..", "web", "dataservice", "sql"))
global _g_named_manager
_g_named_manager = NamedQueryManager(
os.path.abspath(os.path.realpath(sql_dir)))
def get(name, schema = None):
"Get the named query object to be used to perform queries"
if _g_named_manager is None:
_init_g_named_manager()
return _g_named_manager.get(name).for_schema(schema)
def sql(connection, name, params):
# use module-global NamedQuery object to perform default substitution
return get(name).sql(connection, params)
def run(connection, name, params, expect_rows = None):
"""\
@brief given a connection, run a named query with the params
Note that this function will fetch ALL rows.
@param connection The connection to use
@param name The name of the query to run
@param params The parameters passed into the query
@param expect_rows The number of rows expected. Set to 1 if return_as_map is true. Raises ExpectationFailed if the number of returned rows doesn't exactly match. Kind of a hack.
@return Returns the result set as a list of dicts.
"""
return get(name).run(connection, params, expect_rows)
class ExpectationFailed(Exception):
""" Exception that is raised when an expectation for an sql query
is not met."""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class NamedQuery(object):
def __init__(self, name, filename):
""" Construct a NamedQuery object. The name argument is an
arbitrary name as a handle for the query, and the filename is
a path to a file or a file-like object containing an llsd named
query document."""
self._stat_interval_seconds = 5 # 5 seconds
self._name = name
if (filename is not None and isinstance(filename, (str, unicode))
and NQ_FILE_SUFFIX != filename[-NQ_FILE_SUFFIX_LEN:]):
filename = filename + NQ_FILE_SUFFIX
self._location = filename
self._alternative = dict()
self._last_mod_time = 0
self._last_check_time = 0
self.deleted = False
self.load_contents()
def name(self):
""" The name of the query. """
return self._name
def get_modtime(self):
""" Returns the mtime (last modified time) of the named query
filename. For file-like objects, expect a modtime of 0"""
if self._location and isinstance(self._location, (str, unicode)):
return os.path.getmtime(self._location)
return 0
def load_contents(self):
""" Loads and parses the named query file into self. Does
nothing if self.location is nonexistant."""
if self._location:
if isinstance(self._location, (str, unicode)):
contents = llsd.parse(open(self._location).read())
else:
# we probably have a file-like object. Godspeed!
contents = llsd.parse(self._location.read())
self._reference_contents(contents)
# Check for alternative implementations
try:
for name, alt in self._contents['alternative'].items():
nq = NamedQuery(name, None)
nq._reference_contents(alt)
self._alternative[name] = nq
except KeyError, e:
pass
self._last_mod_time = self.get_modtime()
self._last_check_time = time.time()
def _reference_contents(self, contents):
"Helper method which builds internal structure from parsed contents"
self._contents = contents
self._ttl = int(self._contents.get('ttl', 0))
self._return_as_map = bool(self._contents.get('return_as_map', False))
self._legacy_dbname = self._contents.get('legacy_dbname', None)
# reset these before doing the sql conversion because we will
# read them there. reset these while loading so we pick up
# changes.
self._around = set()
self._append = set()
self._integer = set()
self._options = self._contents.get('dynamic_where', {})
for key in self._options:
if isinstance(self._options[key], basestring):
self._options[key] = self._convert_sql(self._options[key])
elif isinstance(self._options[key], list):
lines = []
for line in self._options[key]:
lines.append(self._convert_sql(line))
self._options[key] = lines
else:
moreopt = {}
for kk in self._options[key]:
moreopt[kk] = self._convert_sql(self._options[key][kk])
self._options[key] = moreopt
self._base_query = self._convert_sql(self._contents['base_query'])
self._query_suffix = self._convert_sql(
self._contents.get('query_suffix', ''))
def _convert_sql(self, sql):
"""convert the parsed sql into a useful internal structure.
This function has to turn the named query format into a pyformat
style. It also has to look for %:name% and :name% and
ready them for use in LIKE statements"""
if sql:
# This first sub is to properly escape any % signs that
# are meant to be literally passed through to mysql in the
# query. It leaves any %'s that are used for
# like-expressions.
expr = re.compile("(?<=[^a-zA-Z0-9_-])%(?=[^:])")
sql = expr.sub('%%', sql)
# This should tackle the rest of the %'s in the query, by
# converting them to LIKE clauses.
expr = re.compile("(%?):([a-zA-Z][a-zA-Z0-9_-]*)%")
sql = expr.sub(self._prepare_like, sql)
expr = re.compile("#:([a-zA-Z][a-zA-Z0-9_-]*)")
sql = expr.sub(self._prepare_integer, sql)
expr = re.compile(":([a-zA-Z][a-zA-Z0-9_-]*)")
sql = expr.sub("%(\\1)s", sql)
return sql
def _prepare_like(self, match):
"""This function changes LIKE statement replace behavior
It works by turning %:name% to %(_name_around)s and :name% to
%(_name_append)s. Since a leading '_' is not a valid keyname
input (enforced via unit tests), it will never clash with
existing keys. Then, when building the statement, the query
runner will generate corrected strings."""
if match.group(1) == '%':
# there is a leading % so this is treated as prefix/suffix
self._around.add(match.group(2))
return "%(" + self._build_around_key(match.group(2)) + ")s"
else:
# there is no leading %, so this is suffix only
self._append.add(match.group(2))
return "%(" + self._build_append_key(match.group(2)) + ")s"
def _build_around_key(self, key):
return "_" + key + "_around"
def _build_append_key(self, key):
return "_" + key + "_append"
def _prepare_integer(self, match):
"""This function adjusts the sql for #:name replacements
It works by turning #:name to %(_name_as_integer)s. Since a
leading '_' is not a valid keyname input (enforced via unit
tests), it will never clash with existing keys. Then, when
building the statement, the query runner will generate
corrected strings."""
self._integer.add(match.group(1))
return "%(" + self._build_integer_key(match.group(1)) + ")s"
def _build_integer_key(self, key):
return "_" + key + "_as_integer"
def _strip_wildcards_to_list(self, value):
"""Take string, and strip out the LIKE special characters.
Technically, this is database dependant, but postgresql and
mysql use the same wildcards, and I am not aware of a general
way to handle this. I think you need a sql statement of the
form:
LIKE_STRING( [ANY,ONE,str]... )
which would treat ANY as their any string, and ONE as their
single glyph, and str as something that needs database
specific encoding to not allow any % or _ to affect the query.
As it stands, I believe it's impossible to write a named query
style interface which uses like to search the entire space of
text available. Imagine the query:
% of brain used by average linden
In order to search for %, it must be escaped, so once you have
escaped the string to not do wildcard searches, and be escaped
for the database, and then prepended the wildcard you come
back with one of:
1) %\% of brain used by average linden
2) %%% of brain used by average linden
Then, when passed to the database to be escaped to be database
safe, you get back:
1) %\\% of brain used by average linden
: which means search for any character sequence, followed by a
backslash, followed by any sequence, followed by ' of
brain...'
2) %%% of brain used by average linden
: which (I believe) means search for a % followed by any
character sequence followed by 'of brain...'
Neither of which is what we want!
So, we need a vendor (or extention) for LIKE_STRING. Anyone
want to write it?"""
if isinstance(value, unicode):
utf8_value = value
else:
utf8_value = unicode(value, "utf-8")
esc_list = []
remove_chars = set(u"%_")
for glyph in utf8_value:
if glyph in remove_chars:
continue
esc_list.append(glyph.encode("utf-8"))
return esc_list
def delete(self):
""" Makes this query unusable by deleting all the members and
setting the deleted member. This is desired when the on-disk
query has been deleted but the in-memory copy remains."""
# blow away all members except _name, _location, and deleted
name, location = self._name, self._location
for key in self.__dict__.keys():
del self.__dict__[key]
self.deleted = True
self._name, self._location = name, location
def ttl(self):
""" Estimated time to live of this query. Used for web
services to set the Expires header."""
return self._ttl
def legacy_dbname(self):
return self._legacy_dbname
def return_as_map(self):
""" Returns true if this query is configured to return its
results as a single map (as opposed to a list of maps, the
normal behavior)."""
return self._return_as_map
def for_schema(self, db_name):
"Look trough the alternates and return the correct query"
if db_name is None:
return self
try:
return self._alternative[db_name]
except KeyError, e:
pass
return self
def run(self, connection, params, expect_rows = None, use_dictcursor = True):
"""given a connection, run a named query with the params
Note that this function will fetch ALL rows. We do this because it
opens and closes the cursor to generate the values, and this
isn't a generator so the cursor has no life beyond the method call.
@param cursor The connection to use (this generates its own cursor for the query)
@param name The name of the query to run
@param params The parameters passed into the query
@param expect_rows The number of rows expected. Set to 1 if return_as_map is true. Raises ExpectationFailed if the number of returned rows doesn't exactly match. Kind of a hack.
@param use_dictcursor Set to false to use a normal cursor and manually convert the rows to dicts.
@return Returns the result set as a list of dicts, or, if the named query has return_as_map set to true, returns a single dict.
"""
if use_dictcursor:
cursor = connection.cursor(MySQLdb.cursors.DictCursor)
else:
cursor = connection.cursor()
full_query, params = self._construct_sql(params)
if DEBUG:
print "SQL:", self.sql(connection, params)
rows = cursor.execute(full_query, params)
# *NOTE: the expect_rows argument is a very cheesy way to get some
# validation on the result set. If you want to add more expectation
# logic, do something more object-oriented and flexible. Or use an ORM.
if(self._return_as_map):
expect_rows = 1
if expect_rows is not None and rows != expect_rows:
cursor.close()
raise ExpectationFailed("Statement expected %s rows, got %s. Sql: '%s' %s" % (
expect_rows, rows, full_query, params))
# convert to dicts manually if we're not using a dictcursor
if use_dictcursor:
result_set = cursor.fetchall()
else:
if cursor.description is None:
# an insert or something
x = cursor.fetchall()
cursor.close()
return x
names = [x[0] for x in cursor.description]
result_set = []
for row in cursor.fetchall():
converted_row = {}
for idx, col_name in enumerate(names):
converted_row[col_name] = row[idx]
result_set.append(converted_row)
cursor.close()
if self._return_as_map:
return result_set[0]
return result_set
def _construct_sql(self, params):
""" Returns a query string and a dictionary of parameters,
suitable for directly passing to the execute() method."""
self.refresh()
# build the query from the options available and the params
base_query = []
base_query.append(self._base_query)
for opt, extra_where in self._options.items():
if type(extra_where) in (dict, list, tuple):
if opt in params:
base_query.append(extra_where[params[opt]])
else:
if opt in params and params[opt]:
base_query.append(extra_where)
if self._query_suffix:
base_query.append(self._query_suffix)
full_query = '\n'.join(base_query)
# Go through the query and rewrite all of the ones with the
# @:name syntax.
rewrite = _RewriteQueryForArray(params)
expr = re.compile("@%\(([a-zA-Z][a-zA-Z0-9_-]*)\)s")
full_query = expr.sub(rewrite.operate, full_query)
params.update(rewrite.new_params)
# build out the params for like. We only have to do this
# parameters which were detected to have ued the where syntax
# during load.
#
# * treat the incoming string as utf-8
# * strip wildcards
# * append or prepend % as appropriate
new_params = {}
for key in params:
if key in self._around:
new_value = ['%']
new_value.extend(self._strip_wildcards_to_list(params[key]))
new_value.append('%')
new_params[self._build_around_key(key)] = ''.join(new_value)
if key in self._append:
new_value = self._strip_wildcards_to_list(params[key])
new_value.append('%')
new_params[self._build_append_key(key)] = ''.join(new_value)
if key in self._integer:
new_params[self._build_integer_key(key)] = int(params[key])
params.update(new_params)
return full_query, params
def sql(self, connection, params):
""" Generates an SQL statement from the named query document
and a dictionary of parameters.
*NOTE: Only use for debugging, because it uses the
non-standard MySQLdb 'literal' method.
"""
if not DEBUG:
import warnings
warnings.warn("Don't use named_query.sql() when not debugging. Used on %s" % self._location)
# do substitution using the mysql (non-standard) 'literal'
# function to do the escaping.
full_query, params = self._construct_sql(params)
return full_query % connection.literal(params)
def refresh(self):
""" Refresh self from the file on the filesystem.
This is optimized to be callable as frequently as you wish,
without adding too much load. It does so by only stat-ing the
file every N seconds, where N defaults to 5 and is
configurable through the member _stat_interval_seconds. If the stat
reveals that the file has changed, refresh will re-parse the
contents of the file and use them to update the named query
instance. If the stat reveals that the file has been deleted,
refresh will call self.delete to make the in-memory
representation unusable."""
now = time.time()
if(now - self._last_check_time > self._stat_interval_seconds):
self._last_check_time = now
try:
modtime = self.get_modtime()
if(modtime > self._last_mod_time):
self.load_contents()
except OSError, e:
if e.errno == errno.ENOENT: # file not found
self.delete() # clean up self
raise # pass the exception along to the caller so they know that this query disappeared
class NamedQueryManager(object):
""" Manages the lifespan of NamedQuery objects, drawing from a
directory hierarchy of named query documents.
In practice this amounts to a memory cache of NamedQuery objects."""
def __init__(self, named_queries_dir):
""" Initializes a manager to look for named queries in a
directory."""
self._dir = os.path.abspath(os.path.realpath(named_queries_dir))
self._cached_queries = {}
def sql(self, connection, name, params):
nq = self.get(name)
return nq.sql(connection, params)
def get(self, name):
""" Returns a NamedQuery instance based on the name, either
from memory cache, or by parsing from disk.
The name is simply a relative path to the directory associated
with the manager object. Before returning the instance, the
NamedQuery object is cached in memory, so that subsequent
accesses don't have to read from disk or do any parsing. This
means that NamedQuery objects returned by this method are
shared across all users of the manager object.
NamedQuery.refresh is used to bring the NamedQuery objects in
sync with the actual files on disk."""
nq = self._cached_queries.get(name)
if nq is None:
nq = NamedQuery(name, os.path.join(self._dir, name))
self._cached_queries[name] = nq
else:
try:
nq.refresh()
except OSError, e:
if e.errno == errno.ENOENT: # file not found
del self._cached_queries[name]
raise # pass exception along to caller so they know that the query disappeared
return nq
class _RewriteQueryForArray(object):
"Helper class for rewriting queries with the @:name syntax"
def __init__(self, params):
self.params = params
self.new_params = dict()
def operate(self, match):
"Given a match, return the string that should be in use"
key = match.group(1)
value = self.params[key]
if type(value) in (list,tuple):
rv = []
for idx in range(len(value)):
# if the value@idx is array-like, we are
# probably dealing with a VALUES
new_key = "_%s_%s"%(key, str(idx))
val_item = value[idx]
if type(val_item) in (list, tuple, dict):
if type(val_item) is dict:
# this is because in Python, the order of
# key, value retrieval from the dict is not
# guaranteed to match what the input intended
# and for VALUES, order is important.
# TODO: Implemented ordered dict in LLSD parser?
raise ExpectationFailed('Only lists/tuples allowed,\
received dict')
values_keys = []
for value_idx, item in enumerate(val_item):
# we want a key of the format :
# key_#replacement_#value_row_#value_col
# ugh... so if we are replacing 10 rows in user_note,
# the first values clause would read (for @:user_notes) :-
# ( :_user_notes_0_1_1, :_user_notes_0_1_2, :_user_notes_0_1_3 )
# the input LLSD for VALUES will look like:
# <llsd>...
# <map>
# <key>user_notes</key>
# <array>
# <array> <!-- row 1 for VALUES -->
# <string>...</string>
# <string>...</string>
# <string>...</string>
# </array>
# ...
# </array>
# </map>
# ... </llsd>
values_key = "%s_%s"%(new_key, value_idx)
self.new_params[values_key] = item
values_keys.append("%%(%s)s"%values_key)
# now collapse all these new place holders enclosed in ()
# from [':_key_0_1_1', ':_key_0_1_2', ':_key_0_1_3,...]
# rv will have [ '(:_key_0_1_1, :_key_0_1_2, :_key_0_1_3)', ]
# which is flattened a few lines below join(rv)
rv.append('(%s)' % ','.join(values_keys))
else:
self.new_params[new_key] = val_item
rv.append("%%(%s)s"%new_key)
return ','.join(rv)
else:
# not something that can be expanded, so just drop the
# leading @ in the front of the match. This will mean that
# the single value we have, be it a string, int, whatever
# (other than dict) will correctly show up, eg:
#
# where foo in (@:foobar) -- foobar is a string, so we get
# where foo in (:foobar)
return match.group(0)[1:]

View File

@@ -1,84 +0,0 @@
'''
@file shutil2.py
@brief a better shutil.copytree replacement
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
'''
#
# shutil2.py
# Taken from http://www.scons.org/wiki/AccumulateBuilder
# the stock copytree sucks because it insists that the
# target dir not exist
#
import os.path
import shutil
def copytree(src, dest, symlinks=False):
"""My own copyTree which does not fail if the directory exists.
Recursively copy a directory tree using copy2().
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
Behavior is meant to be identical to GNU 'cp -R'.
"""
def copyItems(src, dest, symlinks=False):
"""Function that does all the work.
It is necessary to handle the two 'cp' cases:
- destination does exist
- destination does not exist
See 'cp -R' documentation for more details
"""
for item in os.listdir(src):
srcPath = os.path.join(src, item)
if os.path.isdir(srcPath):
srcBasename = os.path.basename(srcPath)
destDirPath = os.path.join(dest, srcBasename)
if not os.path.exists(destDirPath):
os.makedirs(destDirPath)
copyItems(srcPath, destDirPath)
elif os.path.islink(item) and symlinks:
linkto = os.readlink(item)
os.symlink(linkto, dest)
else:
shutil.copy2(srcPath, dest)
# case 'cp -R src/ dest/' where dest/ already exists
if os.path.exists(dest):
destPath = os.path.join(dest, os.path.basename(src))
if not os.path.exists(destPath):
os.makedirs(destPath)
# case 'cp -R src/ dest/' where dest/ does not exist
else:
os.makedirs(dest)
destPath = dest
# actually copy the files
copyItems(src, destPath)

View File

@@ -1,338 +0,0 @@
#!/usr/bin/env python
"""\
@file simperf_host_xml_parser.py
@brief Digest collector's XML dump and convert to simple dict/list structure
$LicenseInfo:firstyear=2008&license=mit$
Copyright (c) 2008-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import sys, os, getopt, time
import simplejson
from xml import sax
def usage():
print "Usage:"
print sys.argv[0] + " [options]"
print " Convert RRD's XML dump to JSON. Script to convert the simperf_host_collector-"
print " generated RRD dump into JSON. Steps include converting selected named"
print " fields from GAUGE type to COUNTER type by computing delta with preceding"
print " values. Top-level named fields are:"
print
print " lastupdate Time (javascript timestamp) of last data sample"
print " step Time in seconds between samples"
print " ds Data specification (name/type) for each column"
print " database Table of data samples, one time step per row"
print
print "Options:"
print " -i, --in Input settings filename. (Default: stdin)"
print " -o, --out Output settings filename. (Default: stdout)"
print " -h, --help Print this message and exit."
print
print "Example: %s -i rrddump.xml -o rrddump.json" % sys.argv[0]
print
print "Interfaces:"
print " class SimPerfHostXMLParser() # SAX content handler"
print " def simperf_host_xml_fixup(parser) # post-parse value fixup"
class SimPerfHostXMLParser(sax.handler.ContentHandler):
def __init__(self):
pass
def startDocument(self):
self.rrd_last_update = 0 # public
self.rrd_step = 0 # public
self.rrd_ds = [] # public
self.rrd_records = [] # public
self._rrd_level = 0
self._rrd_parse_state = 0
self._rrd_chars = ""
self._rrd_capture = False
self._rrd_ds_val = {}
self._rrd_data_row = []
self._rrd_data_row_has_nan = False
def endDocument(self):
pass
# Nasty little ad-hoc state machine to extract the elements that are
# necessary from the 'rrdtool dump' XML output. The same element
# name '<ds>' is used for two different data sets so we need to pay
# some attention to the actual structure to get the ones we want
# and ignore the ones we don't.
def startElement(self, name, attrs):
self._rrd_level = self._rrd_level + 1
self._rrd_capture = False
if self._rrd_level == 1:
if name == "rrd" and self._rrd_parse_state == 0:
self._rrd_parse_state = 1 # In <rrd>
self._rrd_capture = True
self._rrd_chars = ""
elif self._rrd_level == 2:
if self._rrd_parse_state == 1:
if name == "lastupdate":
self._rrd_parse_state = 2 # In <rrd><lastupdate>
self._rrd_capture = True
self._rrd_chars = ""
elif name == "step":
self._rrd_parse_state = 3 # In <rrd><step>
self._rrd_capture = True
self._rrd_chars = ""
elif name == "ds":
self._rrd_parse_state = 4 # In <rrd><ds>
self._rrd_ds_val = {}
self._rrd_chars = ""
elif name == "rra":
self._rrd_parse_state = 5 # In <rrd><rra>
elif self._rrd_level == 3:
if self._rrd_parse_state == 4:
if name == "name":
self._rrd_parse_state = 6 # In <rrd><ds><name>
self._rrd_capture = True
self._rrd_chars = ""
elif name == "type":
self._rrd_parse_state = 7 # In <rrd><ds><type>
self._rrd_capture = True
self._rrd_chars = ""
elif self._rrd_parse_state == 5:
if name == "database":
self._rrd_parse_state = 8 # In <rrd><rra><database>
elif self._rrd_level == 4:
if self._rrd_parse_state == 8:
if name == "row":
self._rrd_parse_state = 9 # In <rrd><rra><database><row>
self._rrd_data_row = []
self._rrd_data_row_has_nan = False
elif self._rrd_level == 5:
if self._rrd_parse_state == 9:
if name == "v":
self._rrd_parse_state = 10 # In <rrd><rra><database><row><v>
self._rrd_capture = True
self._rrd_chars = ""
def endElement(self, name):
self._rrd_capture = False
if self._rrd_parse_state == 10:
self._rrd_capture = self._rrd_level == 6
if self._rrd_level == 5:
if self._rrd_chars == "NaN":
self._rrd_data_row_has_nan = True
else:
self._rrd_data_row.append(self._rrd_chars)
self._rrd_parse_state = 9 # In <rrd><rra><database><row>
elif self._rrd_parse_state == 9:
if self._rrd_level == 4:
if not self._rrd_data_row_has_nan:
self.rrd_records.append(self._rrd_data_row)
self._rrd_parse_state = 8 # In <rrd><rra><database>
elif self._rrd_parse_state == 8:
if self._rrd_level == 3:
self._rrd_parse_state = 5 # In <rrd><rra>
elif self._rrd_parse_state == 7:
if self._rrd_level == 3:
self._rrd_ds_val["type"] = self._rrd_chars
self._rrd_parse_state = 4 # In <rrd><ds>
elif self._rrd_parse_state == 6:
if self._rrd_level == 3:
self._rrd_ds_val["name"] = self._rrd_chars
self._rrd_parse_state = 4 # In <rrd><ds>
elif self._rrd_parse_state == 5:
if self._rrd_level == 2:
self._rrd_parse_state = 1 # In <rrd>
elif self._rrd_parse_state == 4:
if self._rrd_level == 2:
self.rrd_ds.append(self._rrd_ds_val)
self._rrd_parse_state = 1 # In <rrd>
elif self._rrd_parse_state == 3:
if self._rrd_level == 2:
self.rrd_step = long(self._rrd_chars)
self._rrd_parse_state = 1 # In <rrd>
elif self._rrd_parse_state == 2:
if self._rrd_level == 2:
self.rrd_last_update = long(self._rrd_chars)
self._rrd_parse_state = 1 # In <rrd>
elif self._rrd_parse_state == 1:
if self._rrd_level == 1:
self._rrd_parse_state = 0 # At top
if self._rrd_level:
self._rrd_level = self._rrd_level - 1
def characters(self, content):
if self._rrd_capture:
self._rrd_chars = self._rrd_chars + content.strip()
def _make_numeric(value):
try:
value = float(value)
except:
value = ""
return value
def simperf_host_xml_fixup(parser, filter_start_time = None, filter_end_time = None):
# Fixup for GAUGE fields that are really COUNTS. They
# were forced to GAUGE to try to disable rrdtool's
# data interpolation/extrapolation for non-uniform time
# samples.
fixup_tags = [ "cpu_user",
"cpu_nice",
"cpu_sys",
"cpu_idle",
"cpu_waitio",
"cpu_intr",
# "file_active",
# "file_free",
# "inode_active",
# "inode_free",
"netif_in_kb",
"netif_in_pkts",
"netif_in_errs",
"netif_in_drop",
"netif_out_kb",
"netif_out_pkts",
"netif_out_errs",
"netif_out_drop",
"vm_page_in",
"vm_page_out",
"vm_swap_in",
"vm_swap_out",
#"vm_mem_total",
#"vm_mem_used",
#"vm_mem_active",
#"vm_mem_inactive",
#"vm_mem_free",
#"vm_mem_buffer",
#"vm_swap_cache",
#"vm_swap_total",
#"vm_swap_used",
#"vm_swap_free",
"cpu_interrupts",
"cpu_switches",
"cpu_forks" ]
col_count = len(parser.rrd_ds)
row_count = len(parser.rrd_records)
# Process the last row separately, just to make all values numeric.
for j in range(col_count):
parser.rrd_records[row_count - 1][j] = _make_numeric(parser.rrd_records[row_count - 1][j])
# Process all other row/columns.
last_different_row = row_count - 1
current_row = row_count - 2
while current_row >= 0:
# Check for a different value than the previous row. If everything is the same
# then this is probably just a filler/bogus entry.
is_different = False
for j in range(col_count):
parser.rrd_records[current_row][j] = _make_numeric(parser.rrd_records[current_row][j])
if parser.rrd_records[current_row][j] != parser.rrd_records[last_different_row][j]:
# We're good. This is a different row.
is_different = True
if not is_different:
# This is a filler/bogus entry. Just ignore it.
for j in range(col_count):
parser.rrd_records[current_row][j] = float('nan')
else:
# Some tags need to be converted into deltas.
for j in range(col_count):
if parser.rrd_ds[j]["name"] in fixup_tags:
parser.rrd_records[last_different_row][j] = \
parser.rrd_records[last_different_row][j] - parser.rrd_records[current_row][j]
last_different_row = current_row
current_row -= 1
# Set fixup_tags in the first row to 'nan' since they aren't useful anymore.
for j in range(col_count):
if parser.rrd_ds[j]["name"] in fixup_tags:
parser.rrd_records[0][j] = float('nan')
# Add a timestamp to each row and to the catalog. Format and name
# chosen to match other simulator logging (hopefully).
start_time = parser.rrd_last_update - (parser.rrd_step * (row_count - 1))
# Build a filtered list of rrd_records if we are limited to a time range.
filter_records = False
if filter_start_time is not None or filter_end_time is not None:
filter_records = True
filtered_rrd_records = []
if filter_start_time is None:
filter_start_time = start_time * 1000
if filter_end_time is None:
filter_end_time = parser.rrd_last_update * 1000
for i in range(row_count):
record_timestamp = (start_time + (i * parser.rrd_step)) * 1000
parser.rrd_records[i].insert(0, record_timestamp)
if filter_records:
if filter_start_time <= record_timestamp and record_timestamp <= filter_end_time:
filtered_rrd_records.append(parser.rrd_records[i])
if filter_records:
parser.rrd_records = filtered_rrd_records
parser.rrd_ds.insert(0, {"type": "GAUGE", "name": "javascript_timestamp"})
def main(argv=None):
opts, args = getopt.getopt(sys.argv[1:], "i:o:h", ["in=", "out=", "help"])
input_file = sys.stdin
output_file = sys.stdout
for o, a in opts:
if o in ("-i", "--in"):
input_file = open(a, 'r')
if o in ("-o", "--out"):
output_file = open(a, 'w')
if o in ("-h", "--help"):
usage()
sys.exit(0)
# Using the SAX parser as it is at least 4X faster and far, far
# smaller on this dataset than the DOM-based interface in xml.dom.minidom.
# With SAX and a 5.4MB xml file, this requires about seven seconds of
# wall-clock time and 32MB VSZ. With the DOM interface, about 22 seconds
# and over 270MB VSZ.
handler = SimPerfHostXMLParser()
sax.parse(input_file, handler)
if input_file != sys.stdin:
input_file.close()
# Various format fixups: string-to-num, gauge-to-counts, add
# a time stamp, etc.
simperf_host_xml_fixup(handler)
# Create JSONable dict with interesting data and format/print it
print >>output_file, simplejson.dumps({ "step" : handler.rrd_step,
"lastupdate": handler.rrd_last_update * 1000,
"ds" : handler.rrd_ds,
"database" : handler.rrd_records })
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,167 +0,0 @@
#!/usr/bin/env python
"""\
@file simperf_oprof_interface.py
@brief Manage OProfile data collection on a host
$LicenseInfo:firstyear=2008&license=mit$
Copyright (c) 2008-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import sys, os, getopt
import simplejson
def usage():
print "Usage:"
print sys.argv[0] + " [options]"
print " Digest the OProfile report forms that come out of the"
print " simperf_oprof_ctl program's -r/--report command. The result"
print " is an array of dictionaires with the following keys:"
print
print " symbol Name of sampled, calling, or called procedure"
print " file Executable or library where symbol resides"
print " percentage Percentage contribution to profile, calls or called"
print " samples Sample count"
print " calls Methods called by the method in question (full only)"
print " called_by Methods calling the method (full only)"
print
print " For 'full' reports the two keys 'calls' and 'called_by' are"
print " themselves arrays of dictionaries based on the first four keys."
print
print "Return Codes:"
print " None. Aggressively digests everything. Will likely mung results"
print " if a program or library has whitespace in its name."
print
print "Options:"
print " -i, --in Input settings filename. (Default: stdin)"
print " -o, --out Output settings filename. (Default: stdout)"
print " -h, --help Print this message and exit."
print
print "Interfaces:"
print " class SimPerfOProfileInterface()"
class SimPerfOProfileInterface:
def __init__(self):
self.isBrief = True # public
self.isValid = False # public
self.result = [] # public
def parse(self, input):
in_samples = False
for line in input:
if in_samples:
if line[0:6] == "------":
self.isBrief = False
self._parseFull(input)
else:
self._parseBrief(input, line)
self.isValid = True
return
try:
hd1, remain = line.split(None, 1)
if hd1 == "samples":
in_samples = True
except ValueError:
pass
def _parseBrief(self, input, line1):
try:
fld1, fld2, fld3, fld4 = line1.split(None, 3)
self.result.append({"samples" : fld1,
"percentage" : fld2,
"file" : fld3,
"symbol" : fld4.strip("\n")})
except ValueError:
pass
for line in input:
try:
fld1, fld2, fld3, fld4 = line.split(None, 3)
self.result.append({"samples" : fld1,
"percentage" : fld2,
"file" : fld3,
"symbol" : fld4.strip("\n")})
except ValueError:
pass
def _parseFull(self, input):
state = 0 # In 'called_by' section
calls = []
called_by = []
current = {}
for line in input:
if line[0:6] == "------":
if len(current):
current["calls"] = calls
current["called_by"] = called_by
self.result.append(current)
state = 0
calls = []
called_by = []
current = {}
else:
try:
fld1, fld2, fld3, fld4 = line.split(None, 3)
tmp = {"samples" : fld1,
"percentage" : fld2,
"file" : fld3,
"symbol" : fld4.strip("\n")}
except ValueError:
continue
if line[0] != " ":
current = tmp
state = 1 # In 'calls' section
elif state == 0:
called_by.append(tmp)
else:
calls.append(tmp)
if len(current):
current["calls"] = calls
current["called_by"] = called_by
self.result.append(current)
def main(argv=None):
opts, args = getopt.getopt(sys.argv[1:], "i:o:h", ["in=", "out=", "help"])
input_file = sys.stdin
output_file = sys.stdout
for o, a in opts:
if o in ("-i", "--in"):
input_file = open(a, 'r')
if o in ("-o", "--out"):
output_file = open(a, 'w')
if o in ("-h", "--help"):
usage()
sys.exit(0)
oprof = SimPerfOProfileInterface()
oprof.parse(input_file)
if input_file != sys.stdin:
input_file.close()
# Create JSONable dict with interesting data and format/print it
print >>output_file, simplejson.dumps(oprof.result)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,191 +0,0 @@
#!/usr/bin/env python
"""\
@file simperf_proc_interface.py
@brief Utility to extract log messages from *.<pid>.llsd files containing performance statistics.
$LicenseInfo:firstyear=2008&license=mit$
Copyright (c) 2008-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
# ----------------------------------------------------
# Utility to extract log messages from *.<pid>.llsd
# files that contain performance statistics.
# ----------------------------------------------------
import sys, os
if os.path.exists("setup-path.py"):
execfile("setup-path.py")
from indra.base import llsd
DEFAULT_PATH="/dev/shm/simperf/"
# ----------------------------------------------------
# Pull out the stats and return a single document
def parse_logfile(filename, target_column=None, verbose=False):
full_doc = []
# Open source temp log file. Let exceptions percolate up.
sourcefile = open( filename,'r')
if verbose:
print "Reading " + filename
# Parse and output all lines from the temp file
for line in sourcefile.xreadlines():
partial_doc = llsd.parse(line)
if partial_doc is not None:
if target_column is None:
full_doc.append(partial_doc)
else:
trim_doc = { target_column: partial_doc[target_column] }
if target_column != "fps":
trim_doc[ 'fps' ] = partial_doc[ 'fps' ]
trim_doc[ '/total_time' ] = partial_doc[ '/total_time' ]
trim_doc[ 'utc_time' ] = partial_doc[ 'utc_time' ]
full_doc.append(trim_doc)
sourcefile.close()
return full_doc
# Extract just the meta info line, and the timestamp of the first/last frame entry.
def parse_logfile_info(filename, verbose=False):
# Open source temp log file. Let exceptions percolate up.
sourcefile = open(filename, 'rU') # U is to open with Universal newline support
if verbose:
print "Reading " + filename
# The first line is the meta info line.
info_line = sourcefile.readline()
if not info_line:
sourcefile.close()
return None
# The rest of the lines are frames. Read the first and last to get the time range.
info = llsd.parse( info_line )
info['start_time'] = None
info['end_time'] = None
first_frame = sourcefile.readline()
if first_frame:
try:
info['start_time'] = int(llsd.parse(first_frame)['timestamp'])
except:
pass
# Read the file backwards to find the last two lines.
sourcefile.seek(0, 2)
file_size = sourcefile.tell()
offset = 1024
num_attempts = 0
end_time = None
if file_size < offset:
offset = file_size
while 1:
sourcefile.seek(-1*offset, 2)
read_str = sourcefile.read(offset)
# Remove newline at the end
if read_str[offset - 1] == '\n':
read_str = read_str[0:-1]
lines = read_str.split('\n')
full_line = None
if len(lines) > 2: # Got two line
try:
end_time = llsd.parse(lines[-1])['timestamp']
except:
# We couldn't parse this line. Try once more.
try:
end_time = llsd.parse(lines[-2])['timestamp']
except:
# Nope. Just move on.
pass
break
if len(read_str) == file_size: # Reached the beginning
break
offset += 1024
info['end_time'] = int(end_time)
sourcefile.close()
return info
def parse_proc_filename(filename):
try:
name_as_list = filename.split(".")
cur_stat_type = name_as_list[0].split("_")[0]
cur_pid = name_as_list[1]
except IndexError, ValueError:
return (None, None)
return (cur_pid, cur_stat_type)
# ----------------------------------------------------
def get_simstats_list(path=None):
""" Return stats (pid, type) listed in <type>_proc.<pid>.llsd """
if path is None:
path = DEFAULT_PATH
simstats_list = []
for file_name in os.listdir(path):
if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd":
simstats_info = parse_logfile_info(path + file_name)
if simstats_info is not None:
simstats_list.append(simstats_info)
return simstats_list
def get_log_info_list(pid=None, stat_type=None, path=None, target_column=None, verbose=False):
""" Return data from all llsd files matching the pid and stat type """
if path is None:
path = DEFAULT_PATH
log_info_list = {}
for file_name in os.listdir ( path ):
if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd":
(cur_pid, cur_stat_type) = parse_proc_filename(file_name)
if cur_pid is None:
continue
if pid is not None and pid != cur_pid:
continue
if stat_type is not None and stat_type != cur_stat_type:
continue
log_info_list[cur_pid] = parse_logfile(path + file_name, target_column, verbose)
return log_info_list
def delete_simstats_files(pid=None, stat_type=None, path=None):
""" Delete *.<pid>.llsd files """
if path is None:
path = DEFAULT_PATH
del_list = []
for file_name in os.listdir(path):
if file_name.endswith(".llsd") and file_name != "simperf_proc_config.llsd":
(cur_pid, cur_stat_type) = parse_proc_filename(file_name)
if cur_pid is None:
continue
if pid is not None and pid != cur_pid:
continue
if stat_type is not None and stat_type != cur_stat_type:
continue
del_list.append(cur_pid)
# Allow delete related exceptions to percolate up if this fails.
os.unlink(os.path.join(DEFAULT_PATH, file_name))
return del_list

View File

@@ -1,222 +0,0 @@
'''
@file term.py
@brief a better shutil.copytree replacement
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
'''
#http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475116
import sys, re
class TerminalController:
"""
A class that can be used to portably generate formatted output to
a terminal.
`TerminalController` defines a set of instance variables whose
values are initialized to the control sequence necessary to
perform a given action. These can be simply included in normal
output to the terminal:
>>> term = TerminalController()
>>> print 'This is '+term.GREEN+'green'+term.NORMAL
Alternatively, the `render()` method can used, which replaces
'${action}' with the string required to perform 'action':
>>> term = TerminalController()
>>> print term.render('This is ${GREEN}green${NORMAL}')
If the terminal doesn't support a given action, then the value of
the corresponding instance variable will be set to ''. As a
result, the above code will still work on terminals that do not
support color, except that their output will not be colored.
Also, this means that you can test whether the terminal supports a
given action by simply testing the truth value of the
corresponding instance variable:
>>> term = TerminalController()
>>> if term.CLEAR_SCREEN:
... print 'This terminal supports clearning the screen.'
Finally, if the width and height of the terminal are known, then
they will be stored in the `COLS` and `LINES` attributes.
"""
# Cursor movement:
BOL = '' #: Move the cursor to the beginning of the line
UP = '' #: Move the cursor up one line
DOWN = '' #: Move the cursor down one line
LEFT = '' #: Move the cursor left one char
RIGHT = '' #: Move the cursor right one char
# Deletion:
CLEAR_SCREEN = '' #: Clear the screen and move to home position
CLEAR_EOL = '' #: Clear to the end of the line.
CLEAR_BOL = '' #: Clear to the beginning of the line.
CLEAR_EOS = '' #: Clear to the end of the screen
# Output modes:
BOLD = '' #: Turn on bold mode
BLINK = '' #: Turn on blink mode
DIM = '' #: Turn on half-bright mode
REVERSE = '' #: Turn on reverse-video mode
NORMAL = '' #: Turn off all modes
# Cursor display:
HIDE_CURSOR = '' #: Make the cursor invisible
SHOW_CURSOR = '' #: Make the cursor visible
# Terminal size:
COLS = None #: Width of the terminal (None for unknown)
LINES = None #: Height of the terminal (None for unknown)
# Foreground colors:
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
# Background colors:
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
_STRING_CAPABILITIES = """
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
def __init__(self, term_stream=sys.stdout):
"""
Create a `TerminalController` and initialize its attributes
with appropriate values for the current terminal.
`term_stream` is the stream that will be used for terminal
output; if this stream is not a tty, then the terminal is
assumed to be a dumb terminal (i.e., have no capabilities).
"""
# Curses isn't available on all platforms
try: import curses
except: return
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream.isatty(): return
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try: curses.setupterm()
except: return
# Look up numeric capabilities.
self.COLS = curses.tigetnum('cols')
self.LINES = curses.tigetnum('lines')
# Look up string capabilities.
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, self._tigetstr(cap_name) or '')
# Colors
set_fg = self._tigetstr('setf')
if set_fg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, curses.tparm(set_fg, i) or '')
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, curses.tparm(set_fg_ansi, i) or '')
set_bg = self._tigetstr('setb')
if set_bg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '')
set_bg_ansi = self._tigetstr('setab')
if set_bg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '')
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
cap = curses.tigetstr(cap_name) or ''
return re.sub(r'\$<\d+>[/*]?', '', cap)
def render(self, template):
"""
Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not).
"""
return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
def _render_sub(self, match):
s = match.group()
if s == '$$': return s
else: return getattr(self, s[2:-1])
#######################################################################
# Example use case: progress bar
#######################################################################
class ProgressBar:
"""
A 3-line progress bar, which looks like::
Header
20% [===========----------------------------------]
progress message
The progress bar is colored, if the terminal supports color
output; and adjusts to the width of the terminal.
"""
BAR = '%3d%% ${GREEN}[${BOLD}%s%s${NORMAL}${GREEN}]${NORMAL}\n'
HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
def __init__(self, term, header):
self.term = term
if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
raise ValueError("Terminal isn't capable enough -- you "
"should use a simpler progress dispaly.")
self.width = self.term.COLS or 75
self.bar = term.render(self.BAR)
self.header = self.term.render(self.HEADER % header.center(self.width))
self.cleared = 1 #: true if we haven't drawn the bar yet.
self.update(0, '')
def update(self, percent, message):
if self.cleared:
sys.stdout.write(self.header)
self.cleared = 0
n = int((self.width-10)*percent)
sys.stdout.write(
self.term.BOL + self.term.UP + self.term.CLEAR_EOL +
(self.bar % (100*percent, '='*n, '-'*(self.width-10-n))) +
self.term.CLEAR_EOL + message.center(self.width))
def clear(self):
if not self.cleared:
sys.stdout.write(self.term.BOL + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL)
self.cleared = 1

View File

@@ -1,508 +0,0 @@
#!/usr/bin/python
## $LicenseInfo:firstyear=2011&license=viewerlgpl$
## Second Life Viewer Source Code
## Copyright (C) 2011, Linden Research, Inc.
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation;
## version 2.1 of the License only.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
## $/LicenseInfo$
r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
This module works with Python 2.3 or higher."""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
__date__ = '$Date: 2006/06/12 23:15:40 $'.split()[1].replace('/', '-')
__version__ = '$Revision: 1.30 $'.split()[1]
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
four possible forms: a similar string of hexadecimal digits, or a
string of 16 raw bytes as an argument named 'bytes', or a tuple of
six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a single
128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, fields=None, int=None,
version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'fields', or 'int' must be given.
The 'version' argument is optional; if given, the resulting UUID
will have its variant and version number set according to RFC 4122,
overriding bits in the given 'hex', 'bytes', 'fields', or 'int'.
"""
if [hex, bytes, fields, int].count(None) != 3:
raise TypeError('need just one of hex, bytes, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
import os
for dir in ['', '/sbin/', '/usr/sbin']:
try:
path = os.path.join(dir, 'ifconfig')
if os.path.exists(path):
pipe = os.popen(path)
else:
continue
except IOError:
continue
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in ['hwaddr', 'ether']:
return int(words[i + 1].replace(':', ''), 16)
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
_buffer = ctypes.create_string_buffer(16)
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit integer. The first time this
runs, it may launch a separate program, which could be quite slow. If
all attempts to obtain the hardware address fail, we choose a random
48-bit number with its eighth bit set to 1 as recommended in RFC 4122."""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
try:
# Python 2.6
from hashlib import md5
except ImportError:
# Python 2.5 and earlier
from md5 import new as md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
import sha
hash = sha.sha(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')

File diff suppressed because it is too large Load Diff