ExportResults.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. import xlsxwriter
  2. import logging
  3. import json
  4. import baangt.base.GlobalConstants as GC
  5. from baangt.base.Timing.Timing import Timing
  6. from baangt.base.Utils import utils
  7. from baangt.base.ExportResults.Append2BaseXLS import Append2BaseXLS
  8. from pathlib import Path
  9. from typing import Optional
  10. from xlsxwriter.worksheet import (
  11. Worksheet, cell_number_tuple, cell_string_tuple)
  12. from sqlalchemy import create_engine
  13. from sqlalchemy.orm import sessionmaker
  14. from baangt.base.DataBaseORM import DATABASE_URL, TestrunLog, TestCaseSequenceLog
  15. from baangt.base.DataBaseORM import TestCaseLog, TestCaseField, GlobalAttribute, TestCaseNetworkInfo
  16. from datetime import datetime
  17. from sqlite3 import IntegrityError
  18. from baangt import plugin_manager
  19. import re
  20. import csv
  21. from dateutil.parser import parse
  22. from uuid import uuid4
  23. from pathlib import Path
  24. from baangt.base.ExportResults.SendStatistics import Statistics
  25. from baangt.base.RuntimeStatistics import Statistic
  26. logger = logging.getLogger("pyC")
  27. class ExportResults:
  28. def __init__(self, **kwargs):
  29. self.kwargs = kwargs
  30. self.testList = []
  31. self.fileName = None
  32. self.testRunInstance = kwargs.get(GC.KWARGS_TESTRUNINSTANCE)
  33. self.testCasesEndDateTimes_1D = kwargs.get('testCasesEndDateTimes_1D')
  34. self.testCasesEndDateTimes_2D = kwargs.get('testCasesEndDateTimes_2D')
  35. self.networkInfo = self._get_network_info(kwargs.get('networkInfo'))
  36. self.testRunName = self.testRunInstance.testRunName
  37. self.dataRecords = self.testRunInstance.dataRecords
  38. self.stage = self.__getStageFromDataRecordsOrGlobalSettings()
  39. self.statistics = Statistics()
  40. self.statistics.update_data(kwargs)
  41. self.statistics.update_runtimeStatistic()
  42. self.logfile = logger.handlers[1].baseFilename
  43. try:
  44. self.exportFormat = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.EXPORT_FORMAT)
  45. if isinstance(self.exportFormat, dict):
  46. self.exportFormat = self.exportFormat.get(GC.EXPORT_FORMAT)
  47. if not self.exportFormat:
  48. self.exportFormat = GC.EXP_XLSX
  49. except KeyError:
  50. self.exportFormat = GC.EXP_XLSX
  51. try:
  52. if kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.STRUCTURE_TESTCASESEQUENCE)[1][1].get(GC.EXPORT_FILENAME):
  53. self.fileName = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.STRUCTURE_TESTCASESEQUENCE)[1][1].get(GC.EXPORT_FILENAME)
  54. except Exception as e:
  55. # fixme: I don't know, why this error came. When a Filename is set, then the above works.
  56. # No time now to debug.
  57. pass
  58. if not self.fileName:
  59. self.fileName = self.__getOutputFileName()
  60. logger.info("Export-Sheet for results: " + self.fileName)
  61. self.__removeUnwantedFields() # Will remove Password-Contents AND fields from data records, that came from
  62. # Globals-File.
  63. # export results to DB
  64. self.testcase_uuids = []
  65. self.exportToDataBase()
  66. if self.exportFormat == GC.EXP_XLSX:
  67. self.fieldListExport = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.EXPORT_FORMAT)["Fieldlist"]
  68. self.workbook = xlsxwriter.Workbook(self.fileName)
  69. self.summarySheet = self.workbook.add_worksheet("Summary")
  70. self.worksheet = self.workbook.add_worksheet("Output")
  71. self.jsonSheet = self.workbook.add_worksheet(f"{self.stage}_JSON")
  72. self.timingSheet = self.workbook.add_worksheet("Timing")
  73. self.cellFormatGreen = self.workbook.add_format()
  74. self.cellFormatGreen.set_bg_color('green')
  75. self.cellFormatRed = self.workbook.add_format()
  76. self.cellFormatRed.set_bg_color('red')
  77. self.cellFormatBold = self.workbook.add_format()
  78. self.cellFormatBold.set_bold(bold=True)
  79. self.summaryRow = 0
  80. self.__setHeaderDetailSheetExcel()
  81. self.makeSummaryExcel()
  82. self.exportResultExcel()
  83. self.exportJsonExcel()
  84. self.exportAdditionalData()
  85. self.write_json_sheet()
  86. self.exportTiming = ExportTiming(self.dataRecords,
  87. self.timingSheet)
  88. if self.networkInfo:
  89. self.networkSheet = self.workbook.add_worksheet("Network")
  90. self.exportNetWork = ExportNetWork(self.networkInfo,
  91. self.testCasesEndDateTimes_1D,
  92. self.testCasesEndDateTimes_2D,
  93. self.workbook,
  94. self.networkSheet)
  95. self.closeExcel()
  96. # Call functionality for potentially exporting data to other sheets/databases
  97. Append2BaseXLS(self.testRunInstance, self.fileName)
  98. elif self.exportFormat == GC.EXP_CSV:
  99. self.export2CSV()
  100. if self.testRunInstance.globalSettings.get("DeactivateStatistics") == "True":
  101. logger.debug("Send Statistics to server is deactivated. Not sending.")
  102. elif self.testRunInstance.globalSettings.get("DeactivateStatistics") is True:
  103. logger.debug("Send Statistics to server is deactivated. Not sending.")
  104. else:
  105. try:
  106. self.statistics.send_statistics()
  107. except Exception as ex:
  108. logger.debug(ex)
  109. def __removeUnwantedFields(self):
  110. lListPasswordFieldNames = ["PASSWORD", "PASSWORT", "PASSW"]
  111. if not self.testRunInstance.globalSettings.get("LetPasswords"):
  112. # If there's a password in GlobalSettings, remove the value:
  113. for key, value in self.testRunInstance.globalSettings.items():
  114. if key.upper() in lListPasswordFieldNames:
  115. self.testRunInstance.globalSettings[key] = "*" * 8
  116. # If there's a password in the datafile, remove the value
  117. # Also remove all columns, that are anyway included in the global settings
  118. for key, fields in self.dataRecords.items():
  119. fieldsToPop = []
  120. for field, value in fields.items():
  121. if field.upper() in ["PASSWORD", "PASSWORT"]:
  122. self.dataRecords[key][field] = "*" * 8
  123. if field in self.testRunInstance.globalSettings.keys():
  124. logger.debug(
  125. f"Added {field} to fields to be removed from data record as it exists in GlobalSettings already.")
  126. fieldsToPop.append(field)
  127. for field in fieldsToPop:
  128. logger.debug(f"Removed field {field} from data record.")
  129. fields.pop(field)
  130. def exportAdditionalData(self):
  131. # Runs only, when KWARGS-Parameter is set.
  132. if self.kwargs.get(GC.EXPORT_ADDITIONAL_DATA):
  133. addExportData = self.kwargs[GC.EXPORT_ADDITIONAL_DATA]
  134. # Loop over the items. KEY = Tabname, Value = Data to be exported.
  135. # For data KEY = Fieldname, Value = Cell-Value
  136. for key, value in addExportData.items():
  137. lExport = ExportAdditionalDataIntoTab(tabname=key, valueDict=value, outputExcelSheet=self.workbook)
  138. lExport.export()
  139. def __getStageFromDataRecordsOrGlobalSettings(self):
  140. """
  141. If "STAGE" is not provided in the data fields (should actually not happen, but who knows),
  142. we shall take it from GlobalSettings. If also not there, take the default Value GC.EXECUTIN_STAGE_TEST
  143. :return:
  144. """
  145. value = None
  146. for key, value in self.dataRecords.items():
  147. break
  148. if not value.get(GC.EXECUTION_STAGE):
  149. stage = self.testRunInstance.globalSettings.get('TC.Stage', GC.EXECUTION_STAGE_TEST)
  150. else:
  151. stage = value.get(GC.EXECUTION_STAGE)
  152. return stage
  153. def export2CSV(self):
  154. """
  155. Writes CSV-File of datarecords
  156. """
  157. f = open(self.fileName, 'w', encoding='utf-8-sig', newline='')
  158. writer = csv.DictWriter(f, self.dataRecords[0].keys())
  159. writer.writeheader()
  160. for i in range(0, len(self.dataRecords) - 1):
  161. writer.writerow(self.dataRecords[i])
  162. f.close()
  163. def exportToDataBase(self):
  164. #
  165. # writes results to DB
  166. #
  167. logger.info(f'Export results to database at: {DATABASE_URL}')
  168. engine = create_engine(DATABASE_URL)
  169. # create a Session
  170. Session = sessionmaker(bind=engine)
  171. session = Session()
  172. # get timings
  173. timing: Timing = self.testRunInstance.timing
  174. start, end, duration = timing.returnTimeSegment(GC.TIMING_TESTRUN)
  175. # get status
  176. success = 0
  177. error = 0
  178. waiting = 0
  179. for value in self.dataRecords.values():
  180. if value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS:
  181. success += 1
  182. elif value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR:
  183. error += 1
  184. if value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_WAITING:
  185. waiting += 1
  186. self.statistics.update_attribute_with_value("TestCasePassed", success)
  187. self.statistics.update_attribute_with_value("TestCaseFailed", error)
  188. self.statistics.update_attribute_with_value("TestCasePaused", waiting)
  189. self.statistics.update_attribute_with_value("TestCaseExecuted", success + error + waiting)
  190. try:
  191. dic = {}
  192. for key in self.testRunInstance.json_dict:
  193. if "$(" in key:
  194. dic[key[2:-1]] = self.testRunInstance.json_dict[key]
  195. json_data = json.dumps(dic)
  196. except Exception as ex:
  197. logger.info(f"RLP Json error while updating in db : {str(ex)}")
  198. json_data = ""
  199. # get documents
  200. datafiles = self.fileName
  201. # create testrun object
  202. tr_log = TestrunLog(
  203. id=self.testRunInstance.uuid.bytes,
  204. testrunName=self.testRunName,
  205. logfileName=self.logfile,
  206. startTime=datetime.strptime(start, "%d-%m-%Y %H:%M:%S"),
  207. endTime=datetime.strptime(end, "%d-%m-%Y %H:%M:%S"),
  208. statusOk=success,
  209. statusFailed=error,
  210. statusPaused=waiting,
  211. dataFile=self.fileName,
  212. RLPJson=json_data,
  213. )
  214. # add to DataBase
  215. session.add(tr_log)
  216. # set globals
  217. for key, value in self.testRunInstance.globalSettings.items():
  218. globalVar = GlobalAttribute(
  219. name=key,
  220. value=str(value),
  221. testrun=tr_log,
  222. )
  223. session.add(globalVar)
  224. self.__save_commit(session)
  225. # create testcase sequence instance
  226. tcs_log = TestCaseSequenceLog(testrun=tr_log)
  227. # create testcases
  228. for tc in self.dataRecords.values():
  229. # get uuid
  230. uuid = uuid4()
  231. # create TestCaseLog instances
  232. tc_log = TestCaseLog(
  233. id=uuid.bytes,
  234. testcase_sequence=tcs_log
  235. )
  236. # store uuid
  237. self.testcase_uuids.append(uuid)
  238. session.add(tc_log)
  239. # add TestCase fields
  240. for key, value in tc.items():
  241. field = TestCaseField(name=key, value=str(value), testcase=tc_log)
  242. session.add(field)
  243. self.__save_commit(session)
  244. # network info
  245. if self.networkInfo:
  246. for entry in self.networkInfo:
  247. if type(entry.get('testcase')) == type(1):
  248. nw_info = TestCaseNetworkInfo(
  249. testcase=tcs_log.testcases[entry.get('testcase')-1],
  250. browserName=entry.get('browserName'),
  251. status=entry.get('status'),
  252. method=entry.get('method'),
  253. url=entry.get('url'),
  254. contentType=entry.get('contentType'),
  255. contentSize=entry.get('contentSize'),
  256. headers=str(entry.get('headers')),
  257. params=str(entry.get('params')),
  258. response=entry.get('response'),
  259. startDateTime=datetime.strptime(entry.get('startDateTime')[:19], '%Y-%m-%dT%H:%M:%S'),
  260. duration=entry.get('duration'),
  261. )
  262. session.add(nw_info)
  263. self.__save_commit(session)
  264. def __save_commit(self, session):
  265. try:
  266. session.commit()
  267. except IntegrityError as e:
  268. logger.critical(f"Integrity Error during commit to database: {e}")
  269. except Exception as e:
  270. logger.critical(f"Unknown error during database commit: {e}")
  271. def _get_test_case_num(self, start_date_time, browser_name):
  272. d_t = parse(start_date_time)
  273. d_t = d_t.replace(tzinfo=None)
  274. if self.testCasesEndDateTimes_1D:
  275. for index, dt_end in enumerate(self.testCasesEndDateTimes_1D):
  276. if d_t < dt_end:
  277. return index + 1
  278. elif self.testCasesEndDateTimes_2D:
  279. browser_num = re.findall(r"\d+\.?\d*", str(browser_name))[-1] \
  280. if re.findall(r"\d+\.?\d*", str(browser_name)) else 0
  281. dt_list_index = int(browser_num) if int(browser_num) > 0 else 0
  282. for i, tcAndDtEnd in enumerate(self.testCasesEndDateTimes_2D[dt_list_index]):
  283. if d_t < tcAndDtEnd[1]:
  284. return tcAndDtEnd[0] + 1
  285. return 'unknown'
  286. def _get_network_info(self, networkInfoDict):
  287. #
  288. # extracts network info data from the given dict
  289. #
  290. if networkInfoDict:
  291. extractedNetworkInfo = []
  292. for info in networkInfoDict:
  293. #extractedEntry = {}
  294. for entry in info['log']['entries']:
  295. # extract the current entry
  296. extractedNetworkInfo.append({
  297. 'testcase': self._get_test_case_num(entry['startedDateTime'], entry['pageref']),
  298. 'browserName': entry.get('pageref'),
  299. 'status': entry['response'].get('status'),
  300. 'method': entry['request'].get('method'),
  301. 'url': entry['request'].get('url'),
  302. 'contentType': entry['response']['content'].get('mimeType'),
  303. 'contentSize': entry['response']['content'].get('size'),
  304. 'headers': entry['response']['headers'],
  305. 'params': entry['request']['queryString'],
  306. 'response': entry['response']['content'].get('text'),
  307. 'startDateTime': entry['startedDateTime'],
  308. 'duration': entry.get('time'),
  309. })
  310. return extractedNetworkInfo
  311. return None
  312. def exportResultExcel(self, **kwargs):
  313. self._exportData()
  314. def exportJsonExcel(self):
  315. # headers
  316. headers = [
  317. 'Stage',
  318. 'UUID',
  319. 'Attribute',
  320. 'Value',
  321. ]
  322. # header style
  323. header_style = self.workbook.add_format()
  324. header_style.set_bold()
  325. # write header
  326. for index in range(len(headers)):
  327. self.jsonSheet.write(0, index, headers[index], header_style)
  328. # write data
  329. row = 0
  330. for index, testcase in self.dataRecords.items():
  331. # add TestCase fields
  332. for key, value in testcase.items():
  333. row += 1
  334. self.jsonSheet.write(row, 0, self.stage)
  335. self.jsonSheet.write(row, 1, str(self.testcase_uuids[index]))
  336. self.jsonSheet.write(row, 2, key)
  337. self.jsonSheet.write(row, 3, str(value))
  338. # Autowidth
  339. for n in range(len(headers)):
  340. ExcelSheetHelperFunctions.set_column_autowidth(self.jsonSheet, n)
  341. def write_json_sheet(self):
  342. # Used to write rlp_ json in individual sheets
  343. dic = self.testRunInstance.json_dict
  344. for js in dic:
  345. if not js:
  346. continue
  347. elif js[:2] == "$(":
  348. name = js[2:-1]
  349. else:
  350. name = js
  351. jsonSheet = self.workbook.add_worksheet(f"{self.stage}_{name}")
  352. if type(dic[js][0]) == dict: # Condition to get dictionary or dictionary inside list to write headers
  353. data_dic = dic[js][0]
  354. elif type(dic[js][0][0]) == dict:
  355. data_dic = dic[js][0][0]
  356. else:
  357. logger.debug(f"{dic[js]} is not json convertible.")
  358. continue
  359. remove_header = []
  360. for key in data_dic: # Removing headers which consist nested data
  361. if type(data_dic[key]) == list or type(data_dic[key]) == dict:
  362. remove_header.append(key)
  363. for key in remove_header:
  364. del data_dic[key]
  365. headers = []
  366. for index, header in enumerate(data_dic):
  367. jsonSheet.write(0, index, header)
  368. headers.append(header)
  369. row = 1
  370. for data in dic[js]:
  371. if not data:
  372. continue
  373. dt = {}
  374. for y, dt in enumerate(data):
  375. if type(dt) != dict: # for single dictionary data
  376. jsonSheet.write(row, y, data[dt])
  377. else: # if dictionaries are inside list
  378. column = 0 # used to update individual column
  379. for d in dt:
  380. if d not in headers:
  381. continue
  382. try:
  383. jsonSheet.write(row, column, dt[d])
  384. except Exception as ex:
  385. print(ex)
  386. column += 1
  387. row += 1
  388. if type(dt) != dict:
  389. row += 1
  390. def makeSummaryExcel(self):
  391. self.summarySheet.write(0, 0, f"Testreport for {self.testRunName}", self.cellFormatBold)
  392. self.summarySheet.set_column(0, last_col=0, width=15)
  393. # get testrunname my
  394. self.testList.append(self.testRunName)
  395. # Testrecords
  396. self.__writeSummaryCell("Testrecords", len(self.dataRecords), row=2, format=self.cellFormatBold)
  397. value = len([x for x in self.dataRecords.values()
  398. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS])
  399. self.testList.append(value) # Ok my
  400. if not value:
  401. value = ""
  402. self.__writeSummaryCell("Successful", value, format=self.cellFormatGreen)
  403. self.testList.append(value) # paused my
  404. self.__writeSummaryCell("Paused", len([x for x in self.dataRecords.values()
  405. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_WAITING]))
  406. value = len([x["Screenshots"] for x in self.dataRecords.values()
  407. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR])
  408. self.testList.append(value) # error my
  409. if not value:
  410. value = ""
  411. self.__writeSummaryCell("Error", value, format=self.cellFormatRed)
  412. # Logfile
  413. self.__writeSummaryCell("Logfile", logger.handlers[1].baseFilename, row=7)
  414. # get logfilename for database my
  415. self.testList.append(logger.handlers[1].baseFilename)
  416. # database id
  417. self.__writeSummaryCell("Testrun UUID", str(self.testRunInstance.uuid), row=8)
  418. # Timing
  419. timing: Timing = self.testRunInstance.timing
  420. start, end, duration = timing.returnTimeSegment(GC.TIMING_TESTRUN)
  421. self.statistics.update_attribute_with_value("Duration", duration)
  422. self.statistics.update_attribute_with_value("TestRunUUID", str(self.testRunInstance.uuid))
  423. self.__writeSummaryCell("Starttime", start, row=10)
  424. # get start end during time my
  425. self.testList.append(start)
  426. self.testList.append(end)
  427. self.__writeSummaryCell("Endtime", end)
  428. self.__writeSummaryCell("Duration", duration, format=self.cellFormatBold)
  429. self.__writeSummaryCell("Avg. Dur", "")
  430. # Globals:
  431. self.__writeSummaryCell("Global settings for this testrun", "", format=self.cellFormatBold, row=15)
  432. for key, value in self.testRunInstance.globalSettings.items():
  433. self.__writeSummaryCell(key, str(value))
  434. # get global data my
  435. self.testList.append(str(value))
  436. # Testcase and Testsequence setting
  437. self.summaryRow += 1
  438. self.__writeSummaryCell("TestSequence settings follow:", "", format=self.cellFormatBold)
  439. lSequence = self.testRunInstance.testRunUtils.getSequenceByNumber(testRunName=self.testRunName, sequence="1")
  440. if lSequence:
  441. for key, value in lSequence[1].items():
  442. if isinstance(value, list) or isinstance(value, dict):
  443. continue
  444. self.__writeSummaryCell(key, str(value))
  445. def __writeSummaryCell(self, lineHeader, lineText, row=None, format=None, image=False):
  446. if not row:
  447. self.summaryRow += 1
  448. else:
  449. self.summaryRow = row
  450. if not lineText:
  451. # If we have no lineText we want to apply format to the Header
  452. self.summarySheet.write(self.summaryRow, 0, lineHeader, format)
  453. else:
  454. self.summarySheet.write(self.summaryRow, 0, lineHeader)
  455. self.summarySheet.write(self.summaryRow, 1, lineText, format)
  456. def __getOutputFileName(self):
  457. l_file = Path(self.testRunInstance.managedPaths.getOrSetExportPath())
  458. if self.exportFormat == GC.EXP_XLSX:
  459. lExtension = '.xlsx'
  460. elif self.exportFormat == GC.EXP_CSV:
  461. lExtension = '.csv'
  462. else:
  463. logger.critical(f"wrong export file format: {self.exportFormat}, using 'xlsx' instead")
  464. lExtension = '.xlsx'
  465. l_file = l_file.joinpath("baangt_" + self.testRunName + "_" + utils.datetime_return() + lExtension)
  466. logger.debug(f"Filename for export: {str(l_file)}")
  467. return str(l_file)
  468. def __setHeaderDetailSheetExcel(self):
  469. # the 1st column is DB UUID
  470. self.worksheet.write(0, 0, 'UUID')
  471. # Add fields with name "RESULT_*" to output fields.
  472. i = 1
  473. self.__extendFieldList()
  474. for column in self.fieldListExport:
  475. self.worksheet.write(0, i, column)
  476. i += 1
  477. # add JSON field
  478. self.worksheet.write(0, len(self.fieldListExport)+1, "JSON")
  479. def __extendFieldList(self):
  480. """
  481. Fields, that start with "RESULT_" shall always be exported.
  482. Other fields, that shall always be exported are also added (Testcaseerrorlog, etc.)
  483. If global Parameter "TC.ExportAllFields" is set to True ALL fields will be exported
  484. @return:
  485. """
  486. if self.testRunInstance.globalSettings.get("TC.ExportAllFields", False):
  487. self.fieldListExport = [] # Make an empty list, so that we don't have duplicates
  488. for key in self.dataRecords[0].keys():
  489. self.fieldListExport.append(key)
  490. return
  491. try:
  492. for key in self.dataRecords[0].keys():
  493. if "RESULT_" in key.upper():
  494. if not key in self.fieldListExport:
  495. self.fieldListExport.append(key)
  496. except Exception as e:
  497. logger.critical(
  498. f'looks like we have no data in records: {self.dataRecords}, len of dataRecords: {len(self.dataRecords)}')
  499. # They are added here, because they'll not necessarily appear in the first record of the export data:
  500. if not GC.TESTCASEERRORLOG in self.fieldListExport:
  501. self.fieldListExport.append(GC.TESTCASEERRORLOG)
  502. if not GC.SCREENSHOTS in self.fieldListExport:
  503. self.fieldListExport.append(GC.SCREENSHOTS)
  504. if not GC.EXECUTION_STAGE in self.fieldListExport:
  505. self.fieldListExport.append(GC.EXECUTION_STAGE)
  506. def _exportData(self):
  507. for key, value in self.dataRecords.items():
  508. # write DB UUID
  509. try:
  510. self.worksheet.write(key + 1, 0, str(self.testcase_uuids[key]))
  511. # write RESULT fields
  512. for (n, column) in enumerate(self.fieldListExport):
  513. self.__writeCell(key + 1, n + 1, value, column)
  514. # Also write everything as JSON-String into the last column
  515. self.worksheet.write(key + 1, len(self.fieldListExport) + 1, json.dumps(value))
  516. except IndexError as e:
  517. logger.error(f"List of testcase_uuids didn't have a value for {key}. That shouldn't happen!")
  518. except BaseException as e:
  519. logger.error(f"Error happened where it shouldn't. Error was {e}")
  520. # Create autofilter
  521. self.worksheet.autofilter(0, 0, len(self.dataRecords.items()), len(self.fieldListExport))
  522. # Make cells wide enough
  523. for n in range(0, len(self.fieldListExport)):
  524. ExcelSheetHelperFunctions.set_column_autowidth(self.worksheet, n)
  525. def __writeCell(self, line, cellNumber, testRecordDict, fieldName, strip=False):
  526. if fieldName in testRecordDict.keys() and testRecordDict[fieldName]:
  527. # Convert boolean for Output
  528. if isinstance(testRecordDict[fieldName], bool):
  529. testRecordDict[fieldName] = "True" if testRecordDict[fieldName] else "False"
  530. # Remove leading New-Line:
  531. if isinstance(testRecordDict[fieldName], str):
  532. if '\n' in testRecordDict[fieldName][0:5] or strip:
  533. testRecordDict[fieldName] = testRecordDict[fieldName].strip()
  534. # Do different stuff for Dicts and Lists:
  535. if isinstance(testRecordDict[fieldName], dict):
  536. self.worksheet.write(line, cellNumber, testRecordDict[fieldName])
  537. elif isinstance(testRecordDict[fieldName], list):
  538. if fieldName == GC.SCREENSHOTS:
  539. self.__attachScreenshotsToExcelCells(cellNumber, fieldName, line, testRecordDict)
  540. else:
  541. self.worksheet.write(line, cellNumber,
  542. utils.listToString(testRecordDict[fieldName]))
  543. else:
  544. if fieldName == GC.TESTCASESTATUS:
  545. if testRecordDict[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS:
  546. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]), self.cellFormatGreen)
  547. elif testRecordDict[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR:
  548. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]), self.cellFormatRed)
  549. elif fieldName == GC.SCREENSHOTS:
  550. self.__attachScreenshotsToExcelCells(cellNumber, fieldName, line, testRecordDict)
  551. else:
  552. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]))
  553. def __attachScreenshotsToExcelCells(self, cellNumber, fieldName, line, testRecordDict):
  554. # Place the screenshot images "on" the appropriate cell
  555. try:
  556. if type(testRecordDict[fieldName]) == list:
  557. if Path(testRecordDict[fieldName][-1]).is_file():
  558. self.worksheet.insert_image(line, cellNumber, testRecordDict[fieldName][-1], {'x_scale': 0.05,
  559. 'y_scale': 0.05})
  560. else:
  561. logger.error(f"Sceenshot file {testRecordDict[fieldName][-1]} can't be found")
  562. for nextScreenshotNumber in range(len(testRecordDict[fieldName]) - 1):
  563. if Path(testRecordDict[fieldName][nextScreenshotNumber]).is_file():
  564. self.worksheet.insert_image(line, len(self.fieldListExport) + nextScreenshotNumber + 1,
  565. testRecordDict[fieldName][nextScreenshotNumber],
  566. {'x_scale': 0.05, 'y_scale': 0.05})
  567. else:
  568. logger.error(f"Screenshot file {testRecordDict[fieldName][nextScreenshotNumber]} can't be found")
  569. else:
  570. if Path(testRecordDict[fieldName]).is_file():
  571. self.worksheet.insert_image(line, cellNumber, testRecordDict[fieldName], {'x_scale': 0.05,
  572. 'y_scale': 0.05})
  573. else:
  574. logger.error(f"Screenshot file {testRecordDict[fieldName]} can't be found")
  575. except Exception as e:
  576. logger.error(f"Problem with screenshots - can't attach them {e}")
  577. self.worksheet.set_row(line, 35)
  578. def closeExcel(self):
  579. self.workbook.close()
  580. # Next line doesn't work on MAC. Returns "not authorized"
  581. # subprocess.Popen([self.filename], shell=True)
  582. class ExportAdditionalDataIntoTab:
  583. def __init__(self, tabname, valueDict, outputExcelSheet: xlsxwriter.Workbook):
  584. self.tab = outputExcelSheet.add_worksheet(tabname)
  585. self.values = valueDict
  586. def export(self):
  587. self.makeHeader()
  588. self.writeLines()
  589. def makeHeader(self):
  590. for cellNumber, entries in self.values.items():
  591. for column, (key, value) in enumerate(entries.items()):
  592. self.tab.write(0, column, key)
  593. break # Write header only for first line.
  594. def writeLines(self):
  595. currentLine = 1
  596. for line, values in self.values.items():
  597. for column, (key, value) in enumerate(values.items()):
  598. self.tab.write(currentLine, column, value)
  599. currentLine += 1
  600. class ExcelSheetHelperFunctions:
  601. def __init__(self):
  602. pass
  603. @staticmethod
  604. def set_column_autowidth(worksheet: Worksheet, column: int):
  605. """
  606. Set the width automatically on a column in the `Worksheet`.
  607. !!! Make sure you run this function AFTER having all cells filled in
  608. the worksheet!
  609. """
  610. maxwidth = ExcelSheetHelperFunctions.get_column_width(worksheet=worksheet, column=column)
  611. if maxwidth is None:
  612. return
  613. elif maxwidth > 45:
  614. maxwidth = 45
  615. worksheet.set_column(first_col=column, last_col=column, width=maxwidth)
  616. @staticmethod
  617. def get_column_width(worksheet: Worksheet, column: int) -> Optional[int]:
  618. """Get the max column width in a `Worksheet` column."""
  619. strings = getattr(worksheet, '_ts_all_strings', None)
  620. if strings is None:
  621. strings = worksheet._ts_all_strings = sorted(
  622. worksheet.str_table.string_table,
  623. key=worksheet.str_table.string_table.__getitem__)
  624. lengths = set()
  625. for row_id, colums_dict in worksheet.table.items(): # type: int, dict
  626. data = colums_dict.get(column)
  627. if not data:
  628. continue
  629. if type(data) is cell_string_tuple:
  630. iter_length = len(strings[data.string])
  631. if not iter_length:
  632. continue
  633. lengths.add(iter_length)
  634. continue
  635. if type(data) is cell_number_tuple:
  636. iter_length = len(str(data.number))
  637. if not iter_length:
  638. continue
  639. lengths.add(iter_length)
  640. if not lengths:
  641. return None
  642. return max(lengths)
  643. class ExportNetWork:
  644. headers = ['BrowserName', 'TestCaseNum', 'Status', 'Method', 'URL', 'ContentType', 'ContentSize', 'Headers',
  645. 'Params', 'Response', 'startDateTime', 'Duration/ms']
  646. def __init__(self, networkInfo: dict, testCasesEndDateTimes_1D: list,
  647. testCasesEndDateTimes_2D: list, workbook: xlsxwriter.Workbook, sheet: xlsxwriter.worksheet):
  648. self.networkInfo = networkInfo
  649. #self.testCasesEndDateTimes_1D = testCasesEndDateTimes_1D
  650. #self.testCasesEndDateTimes_2D = testCasesEndDateTimes_2D
  651. self.workbook = workbook
  652. self.sheet = sheet
  653. header_style = self.get_header_style()
  654. self.write_header(style=header_style)
  655. self.set_column_align()
  656. self.write_content()
  657. self.set_column_width()
  658. def set_column_align(self):
  659. right_align_indexes = list()
  660. right_align_indexes.append(ExportNetWork.headers.index('ContentSize'))
  661. right_align_indexes.append(ExportNetWork.headers.index('Duration/ms'))
  662. right_align_style = self.get_column_style(alignment='right')
  663. left_align_style = self.get_column_style(alignment='left')
  664. [self.sheet.set_column(i, i, cell_format=right_align_style) if i in right_align_indexes else
  665. self.sheet.set_column(i, i, cell_format=left_align_style) for i in range(len(ExportNetWork.headers))]
  666. def set_column_width(self):
  667. [ExcelSheetHelperFunctions.set_column_autowidth(self.sheet, i) for i in range(len(ExportNetWork.headers))]
  668. def get_header_style(self):
  669. header_style = self.workbook.add_format()
  670. header_style.set_bg_color("#00CCFF")
  671. header_style.set_color("#FFFFFF")
  672. header_style.set_bold()
  673. header_style.set_border()
  674. return header_style
  675. def get_column_style(self, alignment=None):
  676. column_style = self.workbook.add_format()
  677. column_style.set_color("black")
  678. column_style.set_align('right') if alignment == 'right' \
  679. else column_style.set_align('left') if alignment == 'left' else None
  680. column_style.set_border()
  681. return column_style
  682. def write_header(self, style=None):
  683. for index, value in enumerate(ExportNetWork.headers):
  684. self.sheet.write(0, index, value, style)
  685. def _get_test_case_num(self, start_date_time, browser_name):
  686. d_t = parse(start_date_time)
  687. d_t = d_t.replace(tzinfo=None)
  688. if self.testCasesEndDateTimes_1D:
  689. for index, dt_end in enumerate(self.testCasesEndDateTimes_1D):
  690. if d_t < dt_end:
  691. return index + 1
  692. elif self.testCasesEndDateTimes_2D:
  693. browser_num = re.findall(r"\d+\.?\d*", str(browser_name))[-1] \
  694. if re.findall(r"\d+\.?\d*", str(browser_name)) else 0
  695. dt_list_index = int(browser_num) if int(browser_num) > 0 else 0
  696. for i, tcAndDtEnd in enumerate(self.testCasesEndDateTimes_2D[dt_list_index]):
  697. if d_t < tcAndDtEnd[1]:
  698. return tcAndDtEnd[0] + 1
  699. return 'unknown'
  700. def write_content(self):
  701. if not self.networkInfo:
  702. return
  703. #partition_index = 0
  704. for index in range(len(self.networkInfo)):
  705. data_list = [
  706. self.networkInfo[index]['browserName'],
  707. self.networkInfo[index]['testcase'],
  708. self.networkInfo[index]['status'],
  709. self.networkInfo[index]['method'],
  710. self.networkInfo[index]['url'],
  711. self.networkInfo[index]['contentType'],
  712. self.networkInfo[index]['contentSize'],
  713. self.networkInfo[index]['headers'],
  714. self.networkInfo[index]['params'],
  715. self.networkInfo[index]['response'],
  716. self.networkInfo[index]['startDateTime'],
  717. self.networkInfo[index]['duration'],
  718. ]
  719. for i in range(len(data_list)):
  720. self.sheet.write(index + 1, i, str(data_list[i]) or 'null')
  721. class ExportTiming:
  722. def __init__(self, testdataRecords: dict, sheet: xlsxwriter.worksheet):
  723. self.testdataRecords = testdataRecords
  724. self.sheet: xlsxwriter.worksheet = sheet
  725. self.sections = {}
  726. self.findAllTimingSections()
  727. self.writeHeader()
  728. self.writeLines()
  729. # Autowidth
  730. for n in range(0, len(self.sections) + 1):
  731. ExcelSheetHelperFunctions.set_column_autowidth(self.sheet, n)
  732. def writeHeader(self):
  733. self.wc(0, 0, "Testcase#")
  734. for index, key in enumerate(self.sections.keys(), start=1):
  735. self.wc(0, index, key)
  736. def writeLines(self):
  737. for tcNumber, (key, line) in enumerate(self.testdataRecords.items(), start=1):
  738. self.wc(tcNumber, 0, tcNumber)
  739. lSections = self.interpretTimeLog(line[GC.TIMELOG])
  740. for section, timingValue in lSections.items():
  741. # find, in which column this section should be written:
  742. for column, key in enumerate(self.sections.keys(), 1):
  743. if key == section:
  744. self.wc(tcNumber, column,
  745. timingValue[GC.TIMING_DURATION])
  746. continue
  747. @staticmethod
  748. def shortenTimingValue(timingValue):
  749. # TimingValue is seconds in Float. 2 decimals is enough:
  750. timingValue = int(float(timingValue) * 100)
  751. return timingValue / 100
  752. def writeCell(self, row, col, content, format=None):
  753. self.sheet.write(row, col, content, format)
  754. wc = writeCell
  755. def findAllTimingSections(self):
  756. """
  757. We try to have an ordered list of Timing Sequences. As each Testcase might have different sections we'll have
  758. to make guesses
  759. @return:
  760. """
  761. lSections = {}
  762. for key, line in self.testdataRecords.items():
  763. lTiming: dict = ExportTiming.interpretTimeLog(line[GC.TIMELOG])
  764. for key in lTiming.keys():
  765. if lSections.get(key):
  766. continue
  767. else:
  768. lSections[key] = None
  769. self.sections = lSections
  770. @staticmethod
  771. def interpretTimeLog(lTimeLog):
  772. """Example Time Log:
  773. Complete Testrun: Start: 1579553837.241974 - no end recorded
  774. TestCaseSequenceMaster: Start: 1579553837.243414 - no end recorded
  775. CustTestCaseMaster: Start: 1579553838.97329 - no end recorded
  776. Browser Start: , since last call: 2.3161418437957764
  777. Empfehlungen: , since last call: 6.440968036651611, ZIDs:[175aeac023237a73], TS:2020-01-20 21:57:46.525577
  778. Annahme_RABAZ: , since last call: 2.002716064453125e-05, ZIDs:[6be7d0a44e59acf6], TS:2020-01-20 21:58:37.203583
  779. Antrag drucken: , since last call: 9.075241088867188, ZIDs:[6be7d0a44e59acf6, b27c3875ddcbb4fa], TS:2020-01-20 21:58:38.040137
  780. Warten auf Senden an Bestand Button: , since last call: 1.3927149772644043
  781. Senden an Bestand: , since last call: 9.60469913482666, ZIDs:[66b12fa4869cf8a0, ad1f3d47c4694e26], TS:2020-01-20 21:58:49.472288
  782. where the first part before ":" is the section, "since last call:" is the duration, TS: is the timestamp
  783. Update 29.3.2020: Format changed to "since last call: 00:xx:xx,", rest looks identical.
  784. """
  785. lExport = {}
  786. lLines = lTimeLog.split("\n")
  787. for line in lLines:
  788. parts = line.split(",")
  789. if len(parts) < 2:
  790. continue
  791. if "Start:" in line:
  792. # Format <sequence>: <Start>: <time.loctime>
  793. continue
  794. else:
  795. lSection = parts[0].replace(":", "").strip()
  796. lDuration = parts[1].split("since last call: ")[1]
  797. lExport[lSection] = {GC.TIMING_DURATION: lDuration}
  798. return lExport