diff --git a/testing/src/build_log.py b/testing/src/build_log.py index 6ae156b..9b8019b 100644 --- a/testing/src/build_log.py +++ b/testing/src/build_log.py @@ -19,10 +19,10 @@ class BuildOutputXMLParsing(object): XML parsing utilities for dealing with the Boost Build output XML format. ''' - + def get_child_data( self, root, tag = None, id = None, name = None, strip = False, default = None ): return self.get_data(self.get_child(root,tag=tag,id=id,name=name),strip=strip,default=default) - + def get_data( self, node, strip = False, default = None ): data = None if node: @@ -45,10 +45,10 @@ def get_data( self, node, strip = False, default = None ): if strip: data = data.strip() return data - + def get_child( self, root, tag = None, id = None, name = None, type = None ): return self.get_sibling(root.firstChild,tag=tag,id=id,name=name,type=type) - + def get_sibling( self, sibling, tag = None, id = None, name = None, type = None ): n = sibling while n: @@ -72,7 +72,7 @@ def get_sibling( self, sibling, tag = None, id = None, name = None, type = None return None class BuildOutputProcessor(BuildOutputXMLParsing): - + def __init__(self, inputs): self.test = {} self.target_to_test = {} @@ -81,7 +81,7 @@ def __init__(self, inputs): self.timestamps = [] for input in inputs: self.add_input(input) - + def add_input(self, input): ''' Add a single build XML output file to our data. @@ -101,7 +101,7 @@ def add_input(self, input): (x_f[1])(node) elif event == xml.dom.pulldom.END_ELEMENT: context.pop() - + def x_name_(self, *context, **kwargs): node = None names = [ ] @@ -119,7 +119,7 @@ def x_name_(self, *context, **kwargs): if hasattr(self,name): return (name,getattr(self,name)) return None - + def x_build_test(self, node): ''' Records the initial test information that will eventually @@ -142,7 +142,7 @@ def x_build_test(self, node): # Add a lookup for the test given the test target. self.target_to_test[self.test[test_name]['target']] = test_name return None - + def x_build_targets_target( self, node ): ''' Process the target dependency DAG into an ancestry tree so we can look up @@ -165,7 +165,7 @@ def x_build_targets_target( self, node ): self.parent[child_jam_target] = jam_target dep_node = self.get_sibling(dep_node.nextSibling,tag='dependency') return None - + def x_build_action( self, node ): ''' Given a build action log, process into the corresponding test log and @@ -231,14 +231,14 @@ def x_build_action( self, node ): if action_type == 'result': test['result'] = action['result'] return None - + def x_build_timestamp( self, node ): ''' The time-stamp goes to the corresponding attribute in the result. ''' self.timestamps.append(self.get_data(node).strip()) return None - + def get_test( self, node, type = None ): ''' Find the test corresponding to an action. For testing targets these @@ -269,12 +269,12 @@ def get_test( self, node, type = None ): test = self.test[lib] else: target_name_ = self.target[target]['name'] - if self.target_to_test.has_key(target_name_): + if target_name_ in self.target_to_test: test = self.test[self.target_to_test[target_name_]] else: test = None return (base,test) - + #~ The command executed for the action. For run actions we omit the command #~ as it's just noise. def get_action_command( self, action_node, action_type ): @@ -282,11 +282,11 @@ def get_action_command( self, action_node, action_type ): return self.get_child_data(action_node,tag='command') else: return '' - + #~ The command output. def get_action_output( self, action_node, action_type ): return self.get_child_data(action_node,tag='output',default='') - + #~ Some basic info about the action. def get_action_info( self, action_node, action_type ): info = {} @@ -311,17 +311,17 @@ def get_action_info( self, action_node, action_type ): return info class BuildConsoleSummaryReport(object): - + HEADER = '\033[35m\033[1m' INFO = '\033[34m' OK = '\033[32m' WARNING = '\033[33m' FAIL = '\033[31m' ENDC = '\033[0m' - + def __init__(self, bop, opt): self.bop = bop - + def generate(self): self.summary_info = { 'total' : 0, @@ -332,11 +332,11 @@ def generate(self): self.print_test_log() self.print_summary() self.header_print("======================================================================") - + @property def failed(self): return len(self.summary_info['failed']) > 0 - + def print_test_log(self): self.header_print("Tests run..") self.header_print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") @@ -359,7 +359,7 @@ def print_test_log(self): self.fail_print("[FAIL] {0}",k) for action in test['actions']: self.print_action(succeed, action) - + def print_action(self, test_succeed, action): ''' Print the detailed info of failed or always print tests. @@ -376,7 +376,7 @@ def print_action(self, test_succeed, action): p("") for line in output.splitlines(): p("{0}",line.encode('utf-8')) - + def print_summary(self): self.header_print("") self.header_print("Testing summary..") @@ -387,27 +387,27 @@ def print_summary(self): self.fail_print("Failed: {0}",len(self.summary_info['failed'])) for test in self.summary_info['failed']: self.fail_print(" {0}/{1}",test['library'],test['test-name']) - + def p_print(self, format, *args, **kargs): - print format.format(*args,**kargs) - + print(format.format(*args,**kargs)) + def info_print(self, format, *args, **kargs): - print self.INFO+format.format(*args,**kargs)+self.ENDC - + print(self.INFO+format.format(*args,**kargs)+self.ENDC) + def header_print(self, format, *args, **kargs): - print self.HEADER+format.format(*args,**kargs)+self.ENDC - + print(self.HEADER+format.format(*args,**kargs)+self.ENDC) + def ok_print(self, format, *args, **kargs): - print self.OK+format.format(*args,**kargs)+self.ENDC - + print(self.OK+format.format(*args,**kargs)+self.ENDC) + def warn_print(self, format, *args, **kargs): - print self.WARNING+format.format(*args,**kargs)+self.ENDC - + print(self.WARNING+format.format(*args,**kargs)+self.ENDC) + def fail_print(self, format, *args, **kargs): - print self.FAIL+format.format(*args,**kargs)+self.ENDC + print(self.FAIL+format.format(*args,**kargs)+self.ENDC) class Main(object): - + def __init__(self,args=None): op = optparse.OptionParser( usage="%prog [options] input+") diff --git a/testing/src/collect_and_upload_logs.py b/testing/src/collect_and_upload_logs.py index d63b765..a819699 100755 --- a/testing/src/collect_and_upload_logs.py +++ b/testing/src/collect_and_upload_logs.py @@ -3,8 +3,8 @@ # Copyright (c) MetaCommunications, Inc. 2003-2007 # Copyright Rene Rivera 2015 # -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at +# Distributed under the Boost Software License, Version 1.0. +# (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) import xml.sax.saxutils @@ -13,46 +13,55 @@ import time import stat import xml.dom.minidom -import xmlrpclib -import httplib +try: + import xmlrpc.client as xmlrpclib +except ImportError: + import xmlrpclib +try: + import http.client as httplib +except ImportError: + import httplib import os.path import string import sys import re -import urlparse +try: + import urllib.parse as urlparse +except ImportError: + import urlparse import getopt import inspect class utils: - + @staticmethod def log_level(): frames = inspect.stack() level = 0 for i in frames[ 3: ]: - if i[0].f_locals.has_key( '__log__' ): + if '__log__' in i[0].f_locals: level = level + i[0].f_locals[ '__log__' ] return level - + @staticmethod def log( message ): sys.stderr.write( '# ' + ' ' * utils.log_level() + message + '\n' ) sys.stderr.flush() - + @staticmethod def accept_args( args_spec, args, options, usage ): - + defaults_num = len(options) - + ( option_pairs, rest_args ) = getopt.getopt( args, '', args_spec ) map( lambda x: options.__setitem__( x[0], x[1] ), option_pairs ) - - if ( options.has_key( '--help' ) or len( options.keys() ) == defaults_num ): + + if ( '--help' in options or len( options.keys() ) == defaults_num ): usage() sys.exit( 1 ) - + if len( rest_args ) > 0 and rest_args[0][0] == '@': f = open( rest_args[0][1:], 'r' ) config_lines = f.read().splitlines() @@ -65,7 +74,7 @@ def accept_args( args_spec, args, options, usage ): options[ '--%s' % m.group( 'name' ) ] = m.group( 'value' ) else: raise 'Invalid format of config line "%s"' % l - + return rest_args @@ -75,7 +84,13 @@ def chr_or_question_mark( c ): else: return '?' -char_translation_table = string.maketrans( + +if sys.version_info[0] == 3: + maketrans = str.maketrans +else: + maketrans = string.maketrans + +char_translation_table = maketrans( ''.join( map( chr, range(0, 256) ) ) , ''.join( map( chr_or_question_mark, range(0, 256) ) ) ) @@ -83,13 +98,13 @@ def chr_or_question_mark( c ): def process_xml_file( input_file, output_file ): utils.log( 'Processing test log "%s"' % input_file ) - + f = open( input_file, 'r' ) xml = f.readlines() f.close() - + for i in range( 0, len(xml)): - xml[i] = string.translate( xml[i], char_translation_table ) + xml[i] = xml[i].translate( char_translation_table ) output_file.writelines( xml ) @@ -105,7 +120,8 @@ def collect_test_logs( input_dirs, test_results_writer ): utils.log( 'Collecting test logs ...' ) for input_dir in input_dirs: utils.log( 'Walking directory "%s" ...' % input_dir ) - os.path.walk( input_dir, process_test_log_files, test_results_writer ) + for name, dirs, files in os.walk( input_dir ): + process_test_log_files(test_results_writer, name, files) dart_status_from_result = { 'succeed': 'passed', @@ -139,12 +155,12 @@ def __init__(self, proxy): self.proxy = proxy def make_connection(self, host): self.realhost = host - return httplib.HTTP(self.proxy) + return httplib.HTTPConnection(self.proxy) def send_request(self, connection, handler, request_body): connection.putrequest('POST','http://%s%s' % (self.realhost,handler)) def send_host(self, connection, host): connection.putheader('Host',self.realhost) - + def publish_test_logs( input_dirs, @@ -157,7 +173,7 @@ def publish_test_logs( utils.log( 'Publishing test logs ...' ) dart_rpc = None dart_dom = {} - + def _publish_test_log_files_ ( unused, dir, names ): for file in names: if os.path.basename( file ) == 'test_log.xml': @@ -177,7 +193,7 @@ def _publish_test_log_files_ ( unused, dir, names ): test['test-name'] = 'unknown' if not test['toolset'] or test['toolset'] == '': test['toolset'] = 'unknown' - if not dart_dom.has_key(test['toolset']): + if not test['toolset'] in dart_dom: dart_dom[test['toolset']] = xml.dom.minidom.parseString( ''' @@ -219,10 +235,11 @@ def _publish_test_log_files_ ( unused, dir, names ): }) submission_dom.documentElement.appendChild( test_dom.documentElement.cloneNode(1) ) - + for input_dir in input_dirs: utils.log( 'Walking directory "%s" ...' % input_dir ) - os.path.walk( input_dir, _publish_test_log_files_, None ) + for name, dirs, files in os.path.walk( input_dir ): + _publish_test_log_files_( None, name, files) if dart_server: try: rpc_transport = None @@ -234,24 +251,24 @@ def _publish_test_log_files_ ( unused, dir, names ): for dom in dart_dom.values(): #~ utils.log('Dart XML: %s' % dom.toxml('utf-8')) dart_rpc.Submit.put(xmlrpclib.Binary(dom.toxml('utf-8'))) - except Exception, e: + except Exception as e: utils.log('Dart server error: %s' % e) def upload_to_ftp( tag, results_file, ftp_proxy, debug_level, ftp_url ): - + if not ftp_url: ftp_host = 'results.boost.org' ftp_url = ''.join(['ftp','://anonymous','@',ftp_host,'/boost/do-not-publish-this-url/results/']) utils.log( 'Uploading log archive "%s" to %s' % ( results_file, tag ) ) - + ftp_parts = urlparse.urlparse(ftp_url) ftp_netloc = re.split('[@]',ftp_parts[1]) ftp_user = re.split('[:]',ftp_netloc[0])[0] ftp_password = re.split('[:]',ftp_netloc[0]+':anonymous')[1] ftp_site = re.split('[:]',ftp_netloc[1])[0] ftp_path = ftp_parts[2] - + if not ftp_proxy: ftp = ftplib.FTP( ftp_site ) ftp.set_debuglevel( debug_level ) @@ -285,7 +302,7 @@ def copy_comments( results_xml, comment_file ): try: results_xml.characters( f.read() ) finally: - f.close() + f.close() else: utils.log( 'Warning: comment file "%s" is not found.' % comment_file ) @@ -316,7 +333,7 @@ def copy_comments( results_xml, comment_file ): results_xml.characters( '' ) results_xml.characters( '' ) results_xml.characters( '\n' ) - + results_xml.endElement( 'comment' ) @@ -328,7 +345,7 @@ def compress_file( file_path, archive_path ): z.write( file_path, os.path.basename( file_path ) ) z.close() utils.log( 'Done writing "%s".'% archive_path ) - except Exception, msg: + except Exception as msg: utils.log( 'Warning: Compressing falied (%s)' % msg ) utils.log( ' Trying to compress using a platform-specific tool...' ) try: import zip_cmd @@ -340,7 +357,7 @@ def compress_file( file_path, archive_path ): if os.path.exists( archive_path ): os.unlink( archive_path ) utils.log( 'Removing stale "%s".' % archive_path ) - + zip_cmd.main( file_path, archive_path ) utils.log( 'Done compressing "%s".' % archive_path ) @@ -355,7 +372,7 @@ def read_timestamp( file ): return time.gmtime( os.stat( file ).st_mtime ) -def collect_logs( +def collect_logs( results_dir , runner_id , tag @@ -370,24 +387,24 @@ def collect_logs( , revision = '' , **unused ): - + timestamp = time.strftime( '%Y-%m-%dT%H:%M:%SZ', read_timestamp( timestamp_file ) ) - + if dart_server: publish_test_logs( [ results_dir ], runner_id, tag, platform, comment_file, timestamp, user, source, run_type, dart_server = dart_server, http_proxy = http_proxy ) - + results_file = os.path.join( results_dir, '%s.xml' % runner_id ) results_writer = open( results_file, 'w' ) utils.log( 'Collecting test logs into "%s"...' % results_file ) - + results_xml = xml.sax.saxutils.XMLGenerator( results_writer ) results_xml.startDocument() - results_xml.startElement( + results_xml.startElement( 'test-run' - , { + , { 'tag': tag , 'platform': platform , 'runner': runner_id @@ -397,7 +414,7 @@ def collect_logs( , 'revision': revision } ) - + copy_comments( results_xml, comment_file ) collect_test_logs( [ results_dir ], results_writer ) @@ -439,7 +456,7 @@ def upload_logs( upload_to_ftp( '%s/logs' % tag, logs_archive, ftp_proxy, debug_level, ftp_url ) -def collect_and_upload_logs( +def collect_and_upload_logs( results_dir , runner_id , tag @@ -458,8 +475,8 @@ def collect_and_upload_logs( , ftp_url = None , **unused ): - - collect_logs( + + collect_logs( results_dir , runner_id , tag @@ -473,7 +490,7 @@ def collect_and_upload_logs( , dart_server = dart_server , http_proxy = http_proxy ) - + upload_logs( results_dir , runner_id @@ -489,7 +506,7 @@ def collect_and_upload_logs( def accept_args( args ): - args_spec = [ + args_spec = [ 'locate-root=' , 'runner=' , 'tag=' @@ -508,7 +525,7 @@ def accept_args( args ): , 'revision=' , 'ftp=' ] - + options = { '--tag' : 'trunk' , '--platform' : sys.platform @@ -523,11 +540,11 @@ def accept_args( args ): , '--dart-server' : 'beta.boost.org:8081' , '--revision' : None , '--ftp' : None - + } - + utils.accept_args( args_spec, args, options, usage ) - + return { 'results_dir' : options[ '--locate-root' ] , 'runner_id' : options[ '--runner' ] @@ -541,7 +558,7 @@ def accept_args( args ): , 'ftp_proxy' : options[ '--ftp-proxy' ] , 'http_proxy' : options[ '--proxy' ] , 'debug_level' : int(options[ '--debug-level' ]) - , 'send_bjam_log' : options.has_key( '--send-bjam-log' ) + , 'send_bjam_log' : '--send-bjam-log' in options , 'dart_server' : options[ '--dart-server' ] , 'revision' : options[ '--revision' ] , 'ftp' : options[ '--ftp' ] @@ -555,15 +572,15 @@ def accept_args( args ): } def usage(): - print 'Usage: %s [command] [options]' % os.path.basename( sys.argv[0] ) - print ''' + print('Usage: %s [command] [options]' % os.path.basename( sys.argv[0] )) + print( ''' Commands: \t%s Options: \t--locate-root directory to to scan for "test_log.xml" files \t--runner runner ID (e.g. "Metacomm") -\t--timestamp path to a file which modification time will be used +\t--timestamp path to a file which modification time will be used \t as a timestamp of the run ("timestamp" by default) \t--comment an HTML comment file to be inserted in the reports \t ("comment.html" by default) @@ -577,13 +594,13 @@ def usage(): \t--proxy HTTP proxy server address and port (e.g. \t 'http://www.someproxy.com:3128', optional) \t--ftp-proxy FTP proxy server (e.g. 'ftpproxy', optional) -\t--debug-level debugging level; controls the amount of debugging +\t--debug-level debugging level; controls the amount of debugging \t output printed; 0 by default (no debug output) \t--dart-server The dart server to send results to. \t--ftp The ftp URL to upload results to. ''' % '\n\t'.join( commands.keys() ) +) - def main(): if len(sys.argv) > 1 and sys.argv[1] in commands: command = sys.argv[1] @@ -591,7 +608,7 @@ def main(): else: command = 'collect-and-upload' args = sys.argv[ 1: ] - + commands[ command ]( **accept_args( args ) ) diff --git a/testing/src/process_jam_log.py b/testing/src/process_jam_log.py index af55b3c..a14d26f 100755 --- a/testing/src/process_jam_log.py +++ b/testing/src/process_jam_log.py @@ -64,19 +64,19 @@ def __init__(self,args=None): 'run-type' : run_type, 'revision' : self.revision, } ) - + self.test = {} self.target_to_test = {} self.target = {} self.parent = {} self.log = {} - + self.add_log() self.gen_output() - + #~ print self.test #~ print self.target - + def add_log(self): if self.input[0]: bjam_xml = self.input[0] @@ -110,7 +110,7 @@ def add_log(self): if item: test_run.appendChild(self.results.createTextNode("\n")) test_run.appendChild(item) - + def gen_output(self): if self.output: out = open(self.output,'w') @@ -118,10 +118,10 @@ def gen_output(self): out = sys.stdout if out: self.results.writexml(out,encoding='utf-8') - + def tostring(self): return self.results.toxml('utf-8') - + def x_name_(self, *context, **kwargs): node = None names = [ ] @@ -139,7 +139,7 @@ def x_name_(self, *context, **kwargs): if hasattr(self,name): return (name,getattr(self,name)) return None - + def x(self, *context, **kwargs): node = None names = [ ] @@ -159,13 +159,13 @@ def x(self, *context, **kwargs): else: assert False, 'Unknown node type %s'%(name) return None - + #~ The timestamp goes to the corresponding attribute in the result. def x_build_timestamp( self, node ): test_run = self.results.documentElement test_run.setAttribute('timestamp',self.get_data(node).strip()) return None - + #~ Comment file becomes a comment node. def x_build_comment( self, node ): comment = None @@ -177,7 +177,7 @@ def x_build_comment( self, node ): if not comment: comment = '' return [self.new_text('comment',comment)] - + #~ Tests are remembered for future reference. def x_build_test( self, node ): test_run = self.results.documentElement @@ -195,7 +195,7 @@ def x_build_test( self, node ): self.target_to_test[self.test[test_name]['target']] = test_name #~ print "--- %s\n => %s" %(self.test[test_name]['target'],test_name) return None - + #~ Process the target dependency DAG into an ancestry tree so we can look up #~ which top-level library and test targets specific build actions correspond to. def x_build_targets_target( self, node ): @@ -219,7 +219,7 @@ def x_build_targets_target( self, node ): #~ print "--- %s\n ^ %s" %(jam_target,child_jam_target) dep_node = self.get_sibling(dep_node.nextSibling,tag='dependency') return None - + #~ Given a build action log, process into the corresponding test log and #~ specific test log sub-part. def x_build_action( self, node ): @@ -297,7 +297,7 @@ def x_build_action( self, node ): result_node.appendChild(self.results.createTextNode("\n")) result_node.appendChild(self.results.createTextNode(result_data)) return None - + #~ The command executed for the action. For run actions we omit the command #~ as it's just noise. def get_action_command( self, action_node, action_type ): @@ -305,11 +305,11 @@ def get_action_command( self, action_node, action_type ): return self.get_child_data(action_node,tag='command') else: return '' - + #~ The command output. def get_action_output( self, action_node, action_type ): return self.get_child_data(action_node,tag='output',default='') - + #~ Some basic info about the action. def get_action_info( self, action_node, action_type ): info = "" @@ -327,7 +327,7 @@ def get_action_info( self, action_node, action_type ): info += "Define: %s\n" %(self.get_data(define,strip=True)) define = self.get_sibling(define.nextSibling,name='define') return info - + #~ Find the test corresponding to an action. For testing targets these #~ are the ones pre-declared in the --dump-test option. For libraries #~ we create a dummy test as needed. @@ -355,12 +355,12 @@ def get_test( self, node, type = None ): test = self.test[lib] else: target_name_ = self.target[target]['name'] - if self.target_to_test.has_key(target_name_): + if target_name_ in self.target_to_test: test = self.test[self.target_to_test[target_name_]] else: test = None return (base,test) - + #~ Find, or create, the test-log node to add results to. def get_log( self, node, test ): target_directory = os.path.dirname(self.get_child_data( @@ -381,7 +381,7 @@ def get_log( self, node, test ): target_directory=target_directory, show_run_output=show_run_output) return self.log[target_directory] - + #~ The precise toolset from the build properties. def get_toolset( self, node ): toolset = self.get_child_data(self.get_child(node,tag='properties'), @@ -389,9 +389,9 @@ def get_toolset( self, node ): toolset_version = self.get_child_data(self.get_child(node,tag='properties'), name='toolset-%s:version'%toolset,strip=True) return '%s-%s' %(toolset,toolset_version) - + #~ XML utilities... - + def get_sibling( self, sibling, tag = None, id = None, name = None, type = None ): n = sibling while n: @@ -413,10 +413,10 @@ def get_sibling( self, sibling, tag = None, id = None, name = None, type = None return n n = n.nextSibling return None - + def get_child( self, root, tag = None, id = None, name = None, type = None ): return self.get_sibling(root.firstChild,tag=tag,id=id,name=name,type=type) - + def get_data( self, node, strip = False, default = None ): data = None if node: @@ -439,10 +439,10 @@ def get_data( self, node, strip = False, default = None ): if strip: data = data.strip() return data - + def get_child_data( self, root, tag = None, id = None, name = None, strip = False, default = None ): return self.get_data(self.get_child(root,tag=tag,id=id,name=name),strip=strip,default=default) - + def new_node( self, tag, *child, **kwargs ): result = self.results.createElement(tag) for k in kwargs.keys(): @@ -457,7 +457,7 @@ def new_node( self, tag, *child, **kwargs ): if c: result.appendChild(c) return result - + def new_text( self, tag, data, **kwargs ): result = self.new_node(tag,**kwargs) data = data.strip() diff --git a/testing/src/regression.py b/testing/src/regression.py index b83225a..afb1e1d 100755 --- a/testing/src/regression.py +++ b/testing/src/regression.py @@ -13,7 +13,6 @@ import os import os.path import platform -import string import sys import time @@ -47,41 +46,41 @@ } class utils: - + @staticmethod def system( commands ): if sys.platform == 'win32': f = open( 'tmp.cmd', 'w' ) - f.write( string.join( commands, '\n' ) ) + f.write( "\n".join( commands ) ) f.close() rc = os.system( 'tmp.cmd' ) return rc else: rc = os.system( '&&'.join( commands ) ) return rc - - + + @staticmethod def checked_system( commands, valid_return_codes = [ 0 ] ): - rc = utils.system( commands ) + rc = utils.system( commands ) if rc not in [ 0 ] + valid_return_codes: raise Exception( 'Command sequence "%s" failed with return code %d' % ( commands, rc ) ) return rc - + @staticmethod def makedirs( path ): if not os.path.exists( path ): os.makedirs( path ) - + @staticmethod def log_level(): frames = inspect.stack() level = 0 for i in frames[ 3: ]: - if i[0].f_locals.has_key( '__log__' ): + if '__log__' in i[0].f_locals: level = level + i[0].f_locals[ '__log__' ] return level - + @staticmethod def log( message ): sys.stderr.write( '# ' + ' ' * utils.log_level() + message + '\n' ) @@ -90,12 +89,12 @@ def log( message ): class runner: def __init__(self,root): - commands = map( + commands = list(map( lambda m: m[8:].replace('_','-'), filter( lambda m: m.startswith('command_'), runner.__dict__.keys()) - ) + )) commands.sort() commands = "commands: %s" % ', '.join(commands) @@ -209,7 +208,7 @@ def __init__(self,root): #~ Initialize option dependent values. self.regression_root = root - + #~ Boost paths. self.boost_root = os.path.join( self.regression_root, 'boost_root' ) self.regression_results = os.path.join( self.regression_root, 'results' ) @@ -217,16 +216,16 @@ def __init__(self,root): self.regression_log = os.path.join( self.regression_results, 'bjam.log' ) else: self.regression_log = os.path.join( self.regression_results, 'bjam.xml' ) - + #~ Boost Build paths. self.tools_bb_root = os.path.join( self.regression_root,'boost_bb' ) self.tools_bb_root = os.path.join( self.tools_bb_root, 'src') self.tools_bjam_root = os.path.join( self.regression_root,'boost_bb', 'src', 'engine' ) - + #~ Regression tools paths. self.tools_regression_root = os.path.join( self.regression_root,'boost_regression' ) self.xsl_reports_dir = os.path.join( self.tools_regression_root, 'xsl_reports' ) - + self.timestamp_path = os.path.join( self.regression_root, 'timestamp' ) if sys.platform == 'win32': self.patch_boost = 'patch_boost.bat' @@ -444,7 +443,7 @@ def command_collect_logs(self): source = 'tarball' revision = self.git_revision(self.boost_root) - + # Generate expanded comment file that has extra status # information. In particular the revisions of all the git # repos in the test tree. @@ -547,10 +546,9 @@ def command_upload_logs(self): def command_regression(self): import socket - import string try: mail_subject = 'Boost regression for %s on %s' % ( self.tag, - string.split(socket.gethostname(), '.')[0] ) + socket.gethostname().split('.')[0] ) start_time = time.localtime() if self.mail: self.log( 'Sending start notification to "%s"' % self.mail ) @@ -611,8 +609,8 @@ def command_show_revision(self): import re re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' ) - print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 ) - print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 ) + print('\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )) + print('\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )) #~ Utilities... @@ -639,6 +637,8 @@ def log(self,message): def rmtree(self,path): if os.path.exists( path ): import shutil + if sys.version_info[0] == 3: + unicode = str #~ shutil.rmtree( unicode( path ) ) if sys.platform == 'win32': os.system( 'del /f /s /q "%s" >nul 2>&1' % path ) @@ -660,7 +660,7 @@ def retry( self, f, max_attempts=5, sleep_secs=10 ): for attempts in range( max_attempts, -1, -1 ): try: return f() - except Exception, msg: + except Exception as msg: self.log( '%s failed with message "%s"' % ( f.__name__, msg ) ) if attempts == 0: self.log( 'Giving up.' ) @@ -670,13 +670,16 @@ def retry( self, f, max_attempts=5, sleep_secs=10 ): time.sleep( sleep_secs ) def http_get( self, source_url, destination_file ): - import urllib + try: + from urllib.request import urlopen, Request + except ImportError: + from urllib2 import urlopen, Request - proxies = None + req = Request(source_url) if hasattr(self,'proxy') and self.proxy is not None: - proxies = { 'https' : self.proxy } + req.set_proxy('https', self.proxy) - src = urllib.urlopen( source_url, proxies = proxies ) + src = urlopen( req ) f = open( destination_file, 'wb' ) while True: @@ -696,7 +699,7 @@ def build_if_needed( self, tool, toolset ): if toolset is None: if self.toolsets is not None: - toolset = string.split( self.toolsets, ',' )[0] + toolset = self.toolsets.split(',' )[0] else: toolset = tool[ 'default_toolset' ] self.log( 'Warning: No bootstrap toolset for "%s" was specified.' % tool[ 'name' ] ) @@ -710,7 +713,7 @@ def build_if_needed( self, tool, toolset ): else: raise 'Could not find "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] ) - if not tool.has_key( 'build_path' ): + if not 'build_path' in tool: tool[ 'build_path' ] = self.tool_path( tool ) if not os.path.exists( tool[ 'build_path' ] ): @@ -719,13 +722,19 @@ def build_if_needed( self, tool, toolset ): self.log( '%s succesfully built in "%s" location' % ( tool[ 'name' ], tool[ 'build_path' ] ) ) def tool_path( self, name_or_spec ): + # Python 2 and 3 compatible + try: + basestring + except NameError: + basestring = str + if isinstance( name_or_spec, basestring ): return os.path.join( self.regression_root, name_or_spec ) if os.path.exists( name_or_spec[ 'path' ] ): return name_or_spec[ 'path' ] - if name_or_spec.has_key( 'build_path' ): + if 'build_path' in name_or_spec: return name_or_spec[ 'build_path' ] build_dir = name_or_spec[ 'build_dir' ] @@ -745,7 +754,7 @@ def bjam_build_cmd( self, *rest ): else: cmd = './build.sh %s' % self.bjam_toolset env_setup_key = 'BJAM_ENVIRONMENT_SETUP' - if os.environ.has_key( env_setup_key ): + if env_setup_key in os.environ: return '%s & %s' % ( os.environ[env_setup_key], cmd ) return cmd @@ -785,7 +794,7 @@ def send_mail( self, subject, msg = '' ): password = None else: server_name = self.smtp_login.split( '@' )[-1] - ( user_name, password ) = string.split( self.smtp_login.split( '@' )[0], ':' ) + ( user_name, password ) = self.smtp_login.split( '@' )[0].split(':') log( ' Sending mail through "%s"...' % server_name ) smtp_server = smtplib.SMTP( server_name ) @@ -805,7 +814,7 @@ def compress_file( self, file_path, archive_path ): z.write( file_path, os.path.basename( file_path ) ) z.close() utils.log( 'Done writing "%s".'% archive_path ) - except Exception, msg: + except Exception as msg: utils.log( 'Warning: Compressing falied (%s)' % msg ) utils.log( ' Trying to compress using a platform-specific tool...' ) try: @@ -875,11 +884,11 @@ def git_source_checkout(self, clean = False): self.git_checkout(git_info['boost'], self.git_branch(), clean) def git_branch(self): - if git_branch.has_key(self.tag): + if self.tag in git_branch: return git_branch[self.tag] else: return self.tag - + def git_revision(self, root): result = '' if self.use_git: @@ -1007,5 +1016,3 @@ def find_boost_dirs( self ): glob.glob( os.path.join( self.regression_root, 'boost[-_]*' ) ) if os.path.isdir( x ) ] - - diff --git a/testing/src/run.py b/testing/src/run.py index 06cfb64..b2ba646 100755 --- a/testing/src/run.py +++ b/testing/src/run.py @@ -10,7 +10,10 @@ import os.path import shutil import sys -import urllib +try: + from urllib.request import FancyURLopener +except ImportError: + from urllib import FancyURLopener #~ Using --skip-script-download is useful to avoid repeated downloading of #~ the regression scripts when doing the regression commands individually. @@ -29,7 +32,7 @@ root = os.path.abspath(os.path.realpath(os.path.curdir)) else: root = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) -print '# Running regressions in %s...' % root +print('# Running regressions in %s...' % root) script_sources = [ 'collect_and_upload_logs.py', 'process_jam_log.py', 'regression.py' ] script_local = root @@ -42,25 +45,25 @@ if not no_update: #~ Bootstrap. #~ * Clear out any old versions of the scripts - print '# Creating regression scripts at %s...' % script_dir + print('# Creating regression scripts at %s...' % script_dir) if os.path.exists(script_dir): shutil.rmtree(script_dir) os.mkdir(script_dir) #~ * Get new scripts, either from local working copy, or from remote if use_local and os.path.exists(script_local): - print '# Copying regression scripts from %s...' % script_local + print('# Copying regression scripts from %s...' % script_local) for src in script_sources: shutil.copyfile( os.path.join(script_local,src), os.path.join(script_dir,src) ) else: - print '# Downloading regression scripts from %s...' % script_remote + print('# Downloading regression scripts from %s...' % script_remote) proxy = None for a in sys.argv[1:]: if a.startswith('--proxy='): proxy = {'https' : a.split('=')[1] } - print '--- %s' %(proxy['https']) + print('--- %s' %(proxy['https'])) break for src in script_sources: - urllib.FancyURLopener(proxy).retrieve( + FancyURLopener(proxy).retrieve( '%s/%s' % (script_remote,src), os.path.join(script_dir,src) ) #~ * Make the scripts available to Python