ExportResults.py 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941
  1. import xlsxwriter
  2. import logging
  3. import json
  4. import baangt.base.GlobalConstants as GC
  5. from baangt.base.Timing.Timing import Timing
  6. from baangt.base.Utils import utils
  7. from baangt.base.ExportResults.Append2BaseXLS import Append2BaseXLS
  8. from pathlib import Path
  9. from typing import Optional
  10. from xlsxwriter.worksheet import (
  11. Worksheet, cell_number_tuple, cell_string_tuple)
  12. from sqlalchemy import create_engine
  13. from sqlalchemy.orm import sessionmaker
  14. from baangt.base.DataBaseORM import DATABASE_URL, TestrunLog, TestCaseSequenceLog
  15. from baangt.base.DataBaseORM import TestCaseLog, TestCaseField, GlobalAttribute, TestCaseNetworkInfo
  16. from datetime import datetime
  17. from sqlite3 import IntegrityError
  18. from baangt import plugin_manager
  19. import re
  20. import csv
  21. from dateutil.parser import parse
  22. from uuid import uuid4
  23. from pathlib import Path
  24. from baangt.base.ExportResults.SendStatistics import Statistics
  25. from baangt.base.RuntimeStatistics import Statistic
  26. from openpyxl import load_workbook
  27. logger = logging.getLogger("pyC")
  28. class ExportResults:
  29. def __init__(self, **kwargs):
  30. self.kwargs = kwargs
  31. self.testList = []
  32. self.fileName = None
  33. self.testRunInstance = kwargs.get(GC.KWARGS_TESTRUNINSTANCE)
  34. self.testCasesEndDateTimes_1D = kwargs.get('testCasesEndDateTimes_1D')
  35. self.testCasesEndDateTimes_2D = kwargs.get('testCasesEndDateTimes_2D')
  36. self.networkInfo = self._get_network_info(kwargs.get('networkInfo'))
  37. self.testRunName = self.testRunInstance.testRunName
  38. self.dataRecords = self.testRunInstance.dataRecords
  39. self.stage = self.__getStageFromDataRecordsOrGlobalSettings()
  40. self.statistics = Statistics()
  41. self.statistics.update_data(kwargs)
  42. self.statistics.update_runtimeStatistic()
  43. self.logfile = logger.handlers[1].baseFilename
  44. try:
  45. self.exportFormat = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.EXPORT_FORMAT)
  46. if isinstance(self.exportFormat, dict):
  47. self.exportFormat = self.exportFormat.get(GC.EXPORT_FORMAT)
  48. if not self.exportFormat:
  49. self.exportFormat = GC.EXP_XLSX
  50. except KeyError:
  51. self.exportFormat = GC.EXP_XLSX
  52. try:
  53. if kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.STRUCTURE_TESTCASESEQUENCE)[1][1].get(GC.EXPORT_FILENAME):
  54. self.fileName = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.STRUCTURE_TESTCASESEQUENCE)[1][1].get(GC.EXPORT_FILENAME)
  55. except Exception as e:
  56. # fixme: I don't know, why this error came. When a Filename is set, then the above works.
  57. # No time now to debug.
  58. pass
  59. if not self.fileName:
  60. self.fileName = self.__getOutputFileName()
  61. logger.info("Export-Sheet for results: " + self.fileName)
  62. self.__removeUnwantedFields() # Will remove Password-Contents AND fields from data records, that came from
  63. # Globals-File.
  64. # export results to DB
  65. self.testcase_uuids = []
  66. self.exportToDataBase()
  67. if self.exportFormat == GC.EXP_XLSX:
  68. self.fieldListExport = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.EXPORT_FORMAT)["Fieldlist"]
  69. self.workbook = xlsxwriter.Workbook(self.fileName)
  70. self.summarySheet = self.workbook.add_worksheet("Summary")
  71. self.worksheet = self.workbook.add_worksheet("Output")
  72. self.jsonSheet = self.workbook.add_worksheet(f"{self.stage}_JSON")
  73. self.timingSheet = self.workbook.add_worksheet("Timing")
  74. self.cellFormatGreen = self.workbook.add_format()
  75. self.cellFormatGreen.set_bg_color('green')
  76. self.cellFormatRed = self.workbook.add_format()
  77. self.cellFormatRed.set_bg_color('red')
  78. self.cellFormatBold = self.workbook.add_format()
  79. self.cellFormatBold.set_bold(bold=True)
  80. self.summaryRow = 0
  81. self.__setHeaderDetailSheetExcel()
  82. self.makeSummaryExcel()
  83. self.exportResultExcel()
  84. self.exportJsonExcel()
  85. self.exportAdditionalData()
  86. self.write_json_sheet()
  87. self.exportTiming = ExportTiming(self.dataRecords,
  88. self.timingSheet)
  89. if self.networkInfo:
  90. self.networkSheet = self.workbook.add_worksheet("Network")
  91. self.exportNetWork = ExportNetWork(self.networkInfo,
  92. self.testCasesEndDateTimes_1D,
  93. self.testCasesEndDateTimes_2D,
  94. self.workbook,
  95. self.networkSheet)
  96. self.closeExcel()
  97. # Call functionality for potentially exporting data to other sheets/databases
  98. Append2BaseXLS(self.testRunInstance, self.fileName)
  99. elif self.exportFormat == GC.EXP_CSV:
  100. self.export2CSV()
  101. if self.testRunInstance.globalSettings.get("DeactivateStatistics") == "True":
  102. logger.debug("Send Statistics to server is deactivated. Not sending.")
  103. elif self.testRunInstance.globalSettings.get("DeactivateStatistics") is True:
  104. logger.debug("Send Statistics to server is deactivated. Not sending.")
  105. else:
  106. try:
  107. self.statistics.send_statistics()
  108. except Exception as ex:
  109. logger.debug(ex)
  110. if not self.testRunInstance.noCloneXls:
  111. self.update_result_in_testrun()
  112. def __removeUnwantedFields(self):
  113. lListPasswordFieldNames = ["PASSWORD", "PASSWORT", "PASSW"]
  114. if not self.testRunInstance.globalSettings.get("LetPasswords"):
  115. # If there's a password in GlobalSettings, remove the value:
  116. for key, value in self.testRunInstance.globalSettings.items():
  117. if key.upper() in lListPasswordFieldNames:
  118. self.testRunInstance.globalSettings[key] = "*" * 8
  119. # If there's a password in the datafile, remove the value
  120. # Also remove all columns, that are anyway included in the global settings
  121. for key, fields in self.dataRecords.items():
  122. fieldsToPop = []
  123. for field, value in fields.items():
  124. if field.upper() in lListPasswordFieldNames:
  125. self.dataRecords[key][field] = "*" * 8
  126. if field in self.testRunInstance.globalSettings.keys():
  127. fieldsToPop.append(field)
  128. for field in fieldsToPop:
  129. if field != 'Screenshots' and field != 'Stage': # Stage and Screenshot are needed in output file
  130. fields.pop(field)
  131. def exportAdditionalData(self):
  132. # Runs only, when KWARGS-Parameter is set.
  133. if self.kwargs.get(GC.EXPORT_ADDITIONAL_DATA):
  134. addExportData = self.kwargs[GC.EXPORT_ADDITIONAL_DATA]
  135. # Loop over the items. KEY = Tabname, Value = Data to be exported.
  136. # For data KEY = Fieldname, Value = Cell-Value
  137. for key, value in addExportData.items():
  138. lExport = ExportAdditionalDataIntoTab(tabname=key, valueDict=value, outputExcelSheet=self.workbook)
  139. lExport.export()
  140. def __getStageFromDataRecordsOrGlobalSettings(self):
  141. """
  142. If "STAGE" is not provided in the data fields (should actually not happen, but who knows),
  143. we shall take it from GlobalSettings. If also not there, take the default Value GC.EXECUTIN_STAGE_TEST
  144. :return:
  145. """
  146. value = None
  147. for key, value in self.dataRecords.items():
  148. break
  149. if not value.get(GC.EXECUTION_STAGE):
  150. stage = self.testRunInstance.globalSettings.get('TC.Stage', GC.EXECUTION_STAGE_TEST)
  151. else:
  152. stage = value.get(GC.EXECUTION_STAGE)
  153. return stage
  154. def export2CSV(self):
  155. """
  156. Writes CSV-File of datarecords
  157. """
  158. f = open(self.fileName, 'w', encoding='utf-8-sig', newline='')
  159. writer = csv.DictWriter(f, self.dataRecords[0].keys())
  160. writer.writeheader()
  161. for i in range(0, len(self.dataRecords) - 1):
  162. writer.writerow(self.dataRecords[i])
  163. f.close()
  164. def exportToDataBase(self):
  165. #
  166. # writes results to DB
  167. #
  168. logger.info(f'Export results to database at: {DATABASE_URL}')
  169. engine = create_engine(DATABASE_URL)
  170. # create a Session
  171. Session = sessionmaker(bind=engine)
  172. session = Session()
  173. # get timings
  174. timing: Timing = self.testRunInstance.timing
  175. start, end, duration = timing.returnTimeSegment(GC.TIMING_TESTRUN)
  176. # get status
  177. success = 0
  178. error = 0
  179. waiting = 0
  180. for value in self.dataRecords.values():
  181. if value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS:
  182. success += 1
  183. elif value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR:
  184. error += 1
  185. if value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_WAITING:
  186. waiting += 1
  187. self.statistics.update_attribute_with_value("TestCasePassed", success)
  188. self.statistics.update_attribute_with_value("TestCaseFailed", error)
  189. self.statistics.update_attribute_with_value("TestCasePaused", waiting)
  190. self.statistics.update_attribute_with_value("TestCaseExecuted", success + error + waiting)
  191. try:
  192. dic = {}
  193. for key in self.testRunInstance.json_dict:
  194. if "$(" in key:
  195. dic[key[2:-1]] = self.testRunInstance.json_dict[key]
  196. json_data = json.dumps(dic)
  197. except Exception as ex:
  198. logger.info(f"RLP Json error while updating in db : {str(ex)}")
  199. json_data = ""
  200. # get documents
  201. datafiles = self.fileName
  202. # create testrun object
  203. tr_log = TestrunLog(
  204. id=self.testRunInstance.uuid.bytes,
  205. testrunName=self.testRunName,
  206. logfileName=self.logfile,
  207. startTime=datetime.strptime(start, "%d-%m-%Y %H:%M:%S"),
  208. endTime=datetime.strptime(end, "%d-%m-%Y %H:%M:%S"),
  209. statusOk=success,
  210. statusFailed=error,
  211. statusPaused=waiting,
  212. dataFile=self.fileName,
  213. RLPJson=json_data,
  214. )
  215. # add to DataBase
  216. session.add(tr_log)
  217. # set globals
  218. for key, value in self.testRunInstance.globalSettings.items():
  219. globalVar = GlobalAttribute(
  220. name=key,
  221. value=str(value),
  222. testrun=tr_log,
  223. )
  224. session.add(globalVar)
  225. self.__save_commit(session)
  226. # create testcase sequence instance
  227. tcs_log = TestCaseSequenceLog(testrun=tr_log, number=1)
  228. # create testcases
  229. for tc_number, tc in enumerate(self.dataRecords.values(), 1):
  230. # get uuid
  231. uuid = uuid4()
  232. # create TestCaseLog instances
  233. tc_log = TestCaseLog(
  234. id=uuid.bytes,
  235. testcase_sequence=tcs_log,
  236. number=tc_number,
  237. )
  238. # store uuid
  239. self.testcase_uuids.append(uuid)
  240. session.add(tc_log)
  241. # add TestCase fields
  242. for key, value in tc.items():
  243. field = TestCaseField(name=key, value=str(value), testcase=tc_log)
  244. session.add(field)
  245. self.__save_commit(session)
  246. # network info
  247. if self.networkInfo:
  248. for entry in self.networkInfo:
  249. if type(entry.get('testcase')) == type(1):
  250. nw_info = TestCaseNetworkInfo(
  251. testcase=tcs_log.testcases[entry.get('testcase')-1],
  252. browserName=entry.get('browserName'),
  253. status=entry.get('status'),
  254. method=entry.get('method'),
  255. url=entry.get('url'),
  256. contentType=entry.get('contentType'),
  257. contentSize=entry.get('contentSize'),
  258. headers=str(entry.get('headers')),
  259. params=str(entry.get('params')),
  260. response=entry.get('response'),
  261. startDateTime=datetime.strptime(entry.get('startDateTime')[:19], '%Y-%m-%dT%H:%M:%S'),
  262. duration=entry.get('duration'),
  263. )
  264. session.add(nw_info)
  265. self.__save_commit(session)
  266. def __save_commit(self, session):
  267. try:
  268. session.commit()
  269. except IntegrityError as e:
  270. logger.critical(f"Integrity Error during commit to database: {e}")
  271. except Exception as e:
  272. logger.critical(f"Unknown error during database commit: {e}")
  273. def _get_test_case_num(self, start_date_time, browser_name):
  274. d_t = parse(start_date_time)
  275. d_t = d_t.replace(tzinfo=None)
  276. if self.testCasesEndDateTimes_1D:
  277. for index, dt_end in enumerate(self.testCasesEndDateTimes_1D):
  278. if d_t < dt_end:
  279. return index + 1
  280. elif self.testCasesEndDateTimes_2D:
  281. browser_num = re.findall(r"\d+\.?\d*", str(browser_name))[-1] \
  282. if re.findall(r"\d+\.?\d*", str(browser_name)) else 0
  283. dt_list_index = int(browser_num) if int(browser_num) > 0 else 0
  284. for i, tcAndDtEnd in enumerate(self.testCasesEndDateTimes_2D[dt_list_index]):
  285. if d_t < tcAndDtEnd[1]:
  286. return tcAndDtEnd[0] + 1
  287. return 'unknown'
  288. def _get_network_info(self, networkInfoDict):
  289. #
  290. # extracts network info data from the given dict
  291. #
  292. if networkInfoDict:
  293. extractedNetworkInfo = []
  294. for info in networkInfoDict:
  295. #extractedEntry = {}
  296. for entry in info['log']['entries']:
  297. # extract the current entry
  298. extractedNetworkInfo.append({
  299. 'testcase': self._get_test_case_num(entry['startedDateTime'], entry['pageref']),
  300. 'browserName': entry.get('pageref'),
  301. 'status': entry['response'].get('status'),
  302. 'method': entry['request'].get('method'),
  303. 'url': entry['request'].get('url'),
  304. 'contentType': entry['response']['content'].get('mimeType'),
  305. 'contentSize': entry['response']['content'].get('size'),
  306. 'headers': entry['response']['headers'],
  307. 'params': entry['request']['queryString'],
  308. 'response': entry['response']['content'].get('text'),
  309. 'startDateTime': entry['startedDateTime'],
  310. 'duration': entry.get('time'),
  311. })
  312. return extractedNetworkInfo
  313. return None
  314. def exportResultExcel(self, **kwargs):
  315. self._exportData()
  316. def exportJsonExcel(self):
  317. # headers
  318. headers = [
  319. 'Stage',
  320. 'UUID',
  321. 'Attribute',
  322. 'Value',
  323. ]
  324. # header style
  325. header_style = self.workbook.add_format()
  326. header_style.set_bold()
  327. # write header
  328. for index in range(len(headers)):
  329. self.jsonSheet.write(0, index, headers[index], header_style)
  330. # write data
  331. row = 0
  332. for index, testcase in self.dataRecords.items():
  333. # add TestCase fields
  334. for key, value in testcase.items():
  335. row += 1
  336. self.jsonSheet.write(row, 0, self.stage)
  337. self.jsonSheet.write(row, 1, str(self.testcase_uuids[index]))
  338. self.jsonSheet.write(row, 2, key)
  339. self.jsonSheet.write(row, 3, str(value))
  340. # Autowidth
  341. for n in range(len(headers)):
  342. ExcelSheetHelperFunctions.set_column_autowidth(self.jsonSheet, n)
  343. def write_json_sheet(self):
  344. # Used to write rlp_ json in individual sheets
  345. dic = self.testRunInstance.json_dict
  346. for js in dic:
  347. if not js:
  348. continue
  349. elif js[:2] == "$(":
  350. name = js[2:-1]
  351. else:
  352. name = js
  353. jsonSheet = self.workbook.add_worksheet(f"{self.stage}_{name}")
  354. if type(dic[js][0]) == dict: # Condition to get dictionary or dictionary inside list to write headers
  355. data_dic = dic[js][0]
  356. elif type(dic[js][0][0]) == dict:
  357. data_dic = dic[js][0][0]
  358. else:
  359. logger.debug(f"{dic[js]} is not json convertible.")
  360. continue
  361. remove_header = []
  362. for key in data_dic: # Removing headers which consist nested data
  363. if type(data_dic[key]) == list or type(data_dic[key]) == dict:
  364. remove_header.append(key)
  365. for key in remove_header:
  366. del data_dic[key]
  367. headers = []
  368. for index, header in enumerate(data_dic):
  369. jsonSheet.write(0, index, header)
  370. headers.append(header)
  371. row = 1
  372. for data in dic[js]:
  373. if not data:
  374. continue
  375. dt = {}
  376. for y, dt in enumerate(data):
  377. if type(dt) != dict: # for single dictionary data
  378. jsonSheet.write(row, y, data[dt])
  379. else: # if dictionaries are inside list
  380. column = 0 # used to update individual column
  381. for d in dt:
  382. if d not in headers:
  383. continue
  384. try:
  385. jsonSheet.write(row, column, dt[d])
  386. except Exception as ex:
  387. print(ex)
  388. column += 1
  389. row += 1
  390. if type(dt) != dict:
  391. row += 1
  392. def makeSummaryExcel(self):
  393. self.summarySheet.write(0, 0, f"Testreport for {self.testRunName}", self.cellFormatBold)
  394. self.summarySheet.set_column(0, last_col=0, width=15)
  395. # get testrunname my
  396. self.testList.append(self.testRunName)
  397. # Testrecords
  398. self.__writeSummaryCell("Testrecords", len(self.dataRecords), row=2, format=self.cellFormatBold)
  399. value = len([x for x in self.dataRecords.values()
  400. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS])
  401. self.testList.append(value) # Ok my
  402. if not value:
  403. value = ""
  404. self.__writeSummaryCell("Successful", value, format=self.cellFormatGreen)
  405. self.testList.append(value) # paused my
  406. self.__writeSummaryCell("Paused", len([x for x in self.dataRecords.values()
  407. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_WAITING]))
  408. value = len([x["Screenshots"] for x in self.dataRecords.values()
  409. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR])
  410. self.testList.append(value) # error my
  411. if not value:
  412. value = ""
  413. self.__writeSummaryCell("Error", value, format=self.cellFormatRed)
  414. # Logfile
  415. self.__writeSummaryCell("Logfile", logger.handlers[1].baseFilename, row=7)
  416. # get logfilename for database my
  417. self.testList.append(logger.handlers[1].baseFilename)
  418. # database id
  419. self.__writeSummaryCell("Testrun UUID", str(self.testRunInstance.uuid), row=8)
  420. # Timing
  421. timing: Timing = self.testRunInstance.timing
  422. start, end, duration = timing.returnTimeSegment(GC.TIMING_TESTRUN)
  423. self.testRun_end = end # used while updating timestamp in source file
  424. self.statistics.update_attribute_with_value("Duration", duration)
  425. self.statistics.update_attribute_with_value("TestRunUUID", str(self.testRunInstance.uuid))
  426. self.__writeSummaryCell("Starttime", start, row=10)
  427. # get start end during time my
  428. self.testList.append(start)
  429. self.testList.append(end)
  430. self.__writeSummaryCell("Endtime", end)
  431. self.__writeSummaryCell("Duration", duration, format=self.cellFormatBold)
  432. self.__writeSummaryCell("Avg. Dur", "")
  433. # Globals:
  434. self.__writeSummaryCell("Global settings for this testrun", "", format=self.cellFormatBold, row=15)
  435. for key, value in self.testRunInstance.globalSettings.items():
  436. if key.upper() in ["PASSWORD", "PASSWORT", "CONFLUENCE-PASSWORD"]:
  437. continue
  438. self.__writeSummaryCell(key, str(value))
  439. # get global data my
  440. self.testList.append(str(value))
  441. # Testcase and Testsequence setting
  442. self.summaryRow += 1
  443. self.__writeSummaryCell("TestSequence settings follow:", "", format=self.cellFormatBold)
  444. lSequence = self.testRunInstance.testRunUtils.getSequenceByNumber(testRunName=self.testRunName, sequence="1")
  445. if lSequence:
  446. for key, value in lSequence[1].items():
  447. if isinstance(value, list) or isinstance(value, dict):
  448. continue
  449. self.__writeSummaryCell(key, str(value))
  450. def __writeSummaryCell(self, lineHeader, lineText, row=None, format=None, image=False):
  451. if not row:
  452. self.summaryRow += 1
  453. else:
  454. self.summaryRow = row
  455. if not lineText:
  456. # If we have no lineText we want to apply format to the Header
  457. self.summarySheet.write(self.summaryRow, 0, lineHeader, format)
  458. else:
  459. self.summarySheet.write(self.summaryRow, 0, lineHeader)
  460. self.summarySheet.write(self.summaryRow, 1, lineText, format)
  461. def __getOutputFileName(self):
  462. l_file = Path(self.testRunInstance.managedPaths.getOrSetExportPath())
  463. if self.exportFormat == GC.EXP_XLSX:
  464. lExtension = '.xlsx'
  465. elif self.exportFormat == GC.EXP_CSV:
  466. lExtension = '.csv'
  467. else:
  468. logger.critical(f"wrong export file format: {self.exportFormat}, using 'xlsx' instead")
  469. lExtension = '.xlsx'
  470. l_file = l_file.joinpath("baangt_" + self.testRunName + "_" + utils.datetime_return() + lExtension)
  471. logger.debug(f"Filename for export: {str(l_file)}")
  472. return str(l_file)
  473. def __setHeaderDetailSheetExcel(self):
  474. # the 1st column is DB UUID
  475. self.worksheet.write(0, 0, 'UUID')
  476. # Add fields with name "RESULT_*" to output fields.
  477. i = 1
  478. self.__extendFieldList()
  479. for column in self.fieldListExport:
  480. self.worksheet.write(0, i, column)
  481. i += 1
  482. # add JSON field
  483. self.worksheet.write(0, len(self.fieldListExport)+1, "JSON")
  484. def __extendFieldList(self):
  485. """
  486. Fields, that start with "RESULT_" shall always be exported.
  487. Other fields, that shall always be exported are also added (Testcaseerrorlog, etc.)
  488. If global Parameter "TC.ExportAllFields" is set to True ALL fields will be exported
  489. @return:
  490. """
  491. if self.testRunInstance.globalSettings.get("TC.ExportAllFields", False):
  492. self.fieldListExport = [] # Make an empty list, so that we don't have duplicates
  493. for key in self.dataRecords[0].keys():
  494. self.fieldListExport.append(key)
  495. return
  496. try:
  497. for key in self.dataRecords[0].keys():
  498. if "RESULT_" in key.upper():
  499. if not key in self.fieldListExport:
  500. self.fieldListExport.append(key)
  501. except Exception as e:
  502. logger.critical(
  503. f'looks like we have no data in records: {self.dataRecords}, len of dataRecords: {len(self.dataRecords)}')
  504. # They are added here, because they'll not necessarily appear in the first record of the export data:
  505. if not GC.TESTCASEERRORLOG in self.fieldListExport:
  506. self.fieldListExport.append(GC.TESTCASEERRORLOG)
  507. if not GC.SCREENSHOTS in self.fieldListExport:
  508. self.fieldListExport.append(GC.SCREENSHOTS)
  509. if not GC.EXECUTION_STAGE in self.fieldListExport:
  510. self.fieldListExport.append(GC.EXECUTION_STAGE)
  511. def _exportData(self):
  512. for key, value in self.dataRecords.items():
  513. # write DB UUID
  514. try:
  515. self.worksheet.write(key + 1, 0, str(self.testcase_uuids[key]))
  516. # write RESULT fields
  517. for (n, column) in enumerate(self.fieldListExport):
  518. self.__writeCell(key + 1, n + 1, value, column)
  519. # Also write everything as JSON-String into the last column
  520. self.worksheet.write(key + 1, len(self.fieldListExport) + 1, json.dumps(value))
  521. except IndexError as e:
  522. logger.error(f"List of testcase_uuids didn't have a value for {key}. That shouldn't happen!")
  523. except BaseException as e:
  524. logger.error(f"Error happened where it shouldn't. Error was {e}")
  525. # Create autofilter
  526. self.worksheet.autofilter(0, 0, len(self.dataRecords.items()), len(self.fieldListExport))
  527. # Make cells wide enough
  528. for n in range(0, len(self.fieldListExport)):
  529. ExcelSheetHelperFunctions.set_column_autowidth(self.worksheet, n)
  530. def update_result_in_testrun(self):
  531. # To update source testrun file
  532. logger.debug("TestResult updating")
  533. testrun_column = self.dataRecords[0]["testcase_column"]
  534. if testrun_column: # if testrun_column is greater than 0 that means testresult header is present in source file
  535. logger.debug(f'Header for result update is {self.dataRecords[0]["testcase_column"]} in sheet ' \
  536. f'{self.dataRecords[0]["testcase_sheet"]} of file {self.dataRecords[0]["testcase_file"]}')
  537. testrun_file = load_workbook(self.dataRecords[0]["testcase_file"])
  538. testrun_sheet = testrun_file.get_sheet_by_name(self.dataRecords[0]["testcase_sheet"])
  539. for key, value in self.dataRecords.items():
  540. data = f"TestCaseStatus: {value['TestCaseStatus']}\r\n" \
  541. f"Timestamp: {self.testRun_end}\r\n" \
  542. f"Duration: {value['Duration']}\r\n" \
  543. f"TCErrorLog: {value['TCErrorLog']}\r\n" \
  544. f"TestRun_UUID: {str(self.testRunInstance.uuid)}\r\n" \
  545. f"TestCase_UUID: {str(self.testcase_uuids[key])}\r\n\r\n"
  546. old_value = testrun_sheet.cell(value["testcase_row"] + 1, value["testcase_column"]).value or ""
  547. testrun_sheet.cell(value["testcase_row"] + 1, value["testcase_column"]).value = data + old_value
  548. logger.debug(f'Result written in row {value["testcase_row"]} column {value["testcase_column"]}')
  549. logger.debug("Saving Source TestRun file.")
  550. testrun_file.save(self.dataRecords[0]["testcase_file"])
  551. logger.info(f"Source TestRun file {self.dataRecords[0]['testcase_file']} updated.")
  552. else:
  553. logger.debug(f"No TestResult column found")
  554. def __writeCell(self, line, cellNumber, testRecordDict, fieldName, strip=False):
  555. if fieldName in testRecordDict.keys() and testRecordDict[fieldName]:
  556. # Convert boolean for Output
  557. if isinstance(testRecordDict[fieldName], bool):
  558. testRecordDict[fieldName] = "True" if testRecordDict[fieldName] else "False"
  559. # Remove leading New-Line:
  560. if isinstance(testRecordDict[fieldName], str):
  561. if '\n' in testRecordDict[fieldName][0:5] or strip:
  562. testRecordDict[fieldName] = testRecordDict[fieldName].strip()
  563. # Do different stuff for Dicts and Lists:
  564. if isinstance(testRecordDict[fieldName], dict):
  565. self.worksheet.write(line, cellNumber, testRecordDict[fieldName])
  566. elif isinstance(testRecordDict[fieldName], list):
  567. if fieldName == GC.SCREENSHOTS:
  568. self.__attachScreenshotsToExcelCells(cellNumber, fieldName, line, testRecordDict)
  569. else:
  570. self.worksheet.write(line, cellNumber,
  571. utils.listToString(testRecordDict[fieldName]))
  572. else:
  573. if fieldName == GC.TESTCASESTATUS:
  574. if testRecordDict[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS:
  575. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]), self.cellFormatGreen)
  576. elif testRecordDict[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR:
  577. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]), self.cellFormatRed)
  578. elif fieldName == GC.SCREENSHOTS:
  579. self.__attachScreenshotsToExcelCells(cellNumber, fieldName, line, testRecordDict)
  580. else:
  581. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]))
  582. def __attachScreenshotsToExcelCells(self, cellNumber, fieldName, line, testRecordDict):
  583. # Place the screenshot images "on" the appropriate cell
  584. try:
  585. if type(testRecordDict[fieldName]) == list:
  586. if Path(testRecordDict[fieldName][-1]).is_file():
  587. self.worksheet.insert_image(line, cellNumber, testRecordDict[fieldName][-1], {'x_scale': 0.05,
  588. 'y_scale': 0.05})
  589. else:
  590. logger.error(f"Sceenshot file {testRecordDict[fieldName][-1]} can't be found")
  591. for nextScreenshotNumber in range(len(testRecordDict[fieldName]) - 1):
  592. if Path(testRecordDict[fieldName][nextScreenshotNumber]).is_file():
  593. self.worksheet.insert_image(line, len(self.fieldListExport) + nextScreenshotNumber + 1,
  594. testRecordDict[fieldName][nextScreenshotNumber],
  595. {'x_scale': 0.05, 'y_scale': 0.05})
  596. else:
  597. logger.error(f"Screenshot file {testRecordDict[fieldName][nextScreenshotNumber]} can't be found")
  598. else:
  599. if Path(testRecordDict[fieldName]).is_file():
  600. self.worksheet.insert_image(line, cellNumber, testRecordDict[fieldName], {'x_scale': 0.05,
  601. 'y_scale': 0.05})
  602. else:
  603. logger.error(f"Screenshot file {testRecordDict[fieldName]} can't be found")
  604. except Exception as e:
  605. logger.error(f"Problem with screenshots - can't attach them {e}")
  606. self.worksheet.set_row(line, 35)
  607. def closeExcel(self):
  608. self.workbook.close()
  609. # Next line doesn't work on MAC. Returns "not authorized"
  610. # subprocess.Popen([self.filename], shell=True)
  611. class ExportAdditionalDataIntoTab:
  612. def __init__(self, tabname, valueDict, outputExcelSheet: xlsxwriter.Workbook):
  613. self.tab = outputExcelSheet.add_worksheet(tabname)
  614. self.values = valueDict
  615. def export(self):
  616. self.makeHeader()
  617. self.writeLines()
  618. def makeHeader(self):
  619. for cellNumber, entries in self.values.items():
  620. for column, (key, value) in enumerate(entries.items()):
  621. self.tab.write(0, column, key)
  622. break # Write header only for first line.
  623. def writeLines(self):
  624. currentLine = 1
  625. for line, values in self.values.items():
  626. for column, (key, value) in enumerate(values.items()):
  627. self.tab.write(currentLine, column, value)
  628. currentLine += 1
  629. class ExcelSheetHelperFunctions:
  630. def __init__(self):
  631. pass
  632. @staticmethod
  633. def set_column_autowidth(worksheet: Worksheet, column: int):
  634. """
  635. Set the width automatically on a column in the `Worksheet`.
  636. !!! Make sure you run this function AFTER having all cells filled in
  637. the worksheet!
  638. """
  639. maxwidth = ExcelSheetHelperFunctions.get_column_width(worksheet=worksheet, column=column)
  640. if maxwidth is None:
  641. return
  642. elif maxwidth > 45:
  643. maxwidth = 45
  644. worksheet.set_column(first_col=column, last_col=column, width=maxwidth)
  645. @staticmethod
  646. def get_column_width(worksheet: Worksheet, column: int) -> Optional[int]:
  647. """Get the max column width in a `Worksheet` column."""
  648. strings = getattr(worksheet, '_ts_all_strings', None)
  649. if strings is None:
  650. strings = worksheet._ts_all_strings = sorted(
  651. worksheet.str_table.string_table,
  652. key=worksheet.str_table.string_table.__getitem__)
  653. lengths = set()
  654. for row_id, colums_dict in worksheet.table.items(): # type: int, dict
  655. data = colums_dict.get(column)
  656. if not data:
  657. continue
  658. if type(data) is cell_string_tuple:
  659. iter_length = len(strings[data.string])
  660. if not iter_length:
  661. continue
  662. lengths.add(iter_length)
  663. continue
  664. if type(data) is cell_number_tuple:
  665. iter_length = len(str(data.number))
  666. if not iter_length:
  667. continue
  668. lengths.add(iter_length)
  669. if not lengths:
  670. return None
  671. return max(lengths)
  672. class ExportNetWork:
  673. headers = ['BrowserName', 'TestCaseNum', 'Status', 'Method', 'URL', 'ContentType', 'ContentSize', 'Headers',
  674. 'Params', 'Response', 'startDateTime', 'Duration/ms']
  675. def __init__(self, networkInfo: dict, testCasesEndDateTimes_1D: list,
  676. testCasesEndDateTimes_2D: list, workbook: xlsxwriter.Workbook, sheet: xlsxwriter.worksheet):
  677. self.networkInfo = networkInfo
  678. #self.testCasesEndDateTimes_1D = testCasesEndDateTimes_1D
  679. #self.testCasesEndDateTimes_2D = testCasesEndDateTimes_2D
  680. self.workbook = workbook
  681. self.sheet = sheet
  682. header_style = self.get_header_style()
  683. self.write_header(style=header_style)
  684. self.set_column_align()
  685. self.write_content()
  686. self.set_column_width()
  687. def set_column_align(self):
  688. right_align_indexes = list()
  689. right_align_indexes.append(ExportNetWork.headers.index('ContentSize'))
  690. right_align_indexes.append(ExportNetWork.headers.index('Duration/ms'))
  691. right_align_style = self.get_column_style(alignment='right')
  692. left_align_style = self.get_column_style(alignment='left')
  693. [self.sheet.set_column(i, i, cell_format=right_align_style) if i in right_align_indexes else
  694. self.sheet.set_column(i, i, cell_format=left_align_style) for i in range(len(ExportNetWork.headers))]
  695. def set_column_width(self):
  696. [ExcelSheetHelperFunctions.set_column_autowidth(self.sheet, i) for i in range(len(ExportNetWork.headers))]
  697. def get_header_style(self):
  698. header_style = self.workbook.add_format()
  699. header_style.set_bg_color("#00CCFF")
  700. header_style.set_color("#FFFFFF")
  701. header_style.set_bold()
  702. header_style.set_border()
  703. return header_style
  704. def get_column_style(self, alignment=None):
  705. column_style = self.workbook.add_format()
  706. column_style.set_color("black")
  707. column_style.set_align('right') if alignment == 'right' \
  708. else column_style.set_align('left') if alignment == 'left' else None
  709. column_style.set_border()
  710. return column_style
  711. def write_header(self, style=None):
  712. for index, value in enumerate(ExportNetWork.headers):
  713. self.sheet.write(0, index, value, style)
  714. def _get_test_case_num(self, start_date_time, browser_name):
  715. d_t = parse(start_date_time)
  716. d_t = d_t.replace(tzinfo=None)
  717. if self.testCasesEndDateTimes_1D:
  718. for index, dt_end in enumerate(self.testCasesEndDateTimes_1D):
  719. if d_t < dt_end:
  720. return index + 1
  721. elif self.testCasesEndDateTimes_2D:
  722. browser_num = re.findall(r"\d+\.?\d*", str(browser_name))[-1] \
  723. if re.findall(r"\d+\.?\d*", str(browser_name)) else 0
  724. dt_list_index = int(browser_num) if int(browser_num) > 0 else 0
  725. for i, tcAndDtEnd in enumerate(self.testCasesEndDateTimes_2D[dt_list_index]):
  726. if d_t < tcAndDtEnd[1]:
  727. return tcAndDtEnd[0] + 1
  728. return 'unknown'
  729. def write_content(self):
  730. if not self.networkInfo:
  731. return
  732. #partition_index = 0
  733. for index in range(len(self.networkInfo)):
  734. data_list = [
  735. self.networkInfo[index]['browserName'],
  736. self.networkInfo[index]['testcase'],
  737. self.networkInfo[index]['status'],
  738. self.networkInfo[index]['method'],
  739. self.networkInfo[index]['url'],
  740. self.networkInfo[index]['contentType'],
  741. self.networkInfo[index]['contentSize'],
  742. self.networkInfo[index]['headers'],
  743. self.networkInfo[index]['params'],
  744. self.networkInfo[index]['response'],
  745. self.networkInfo[index]['startDateTime'],
  746. self.networkInfo[index]['duration'],
  747. ]
  748. for i in range(len(data_list)):
  749. self.sheet.write(index + 1, i, str(data_list[i]) or 'null')
  750. class ExportTiming:
  751. def __init__(self, testdataRecords: dict, sheet: xlsxwriter.worksheet):
  752. self.testdataRecords = testdataRecords
  753. self.sheet: xlsxwriter.worksheet = sheet
  754. self.sections = {}
  755. self.findAllTimingSections()
  756. self.writeHeader()
  757. self.writeLines()
  758. # Autowidth
  759. for n in range(0, len(self.sections) + 1):
  760. ExcelSheetHelperFunctions.set_column_autowidth(self.sheet, n)
  761. def writeHeader(self):
  762. self.wc(0, 0, "Testcase#")
  763. for index, key in enumerate(self.sections.keys(), start=1):
  764. self.wc(0, index, key)
  765. def writeLines(self):
  766. for tcNumber, (key, line) in enumerate(self.testdataRecords.items(), start=1):
  767. self.wc(tcNumber, 0, tcNumber)
  768. lSections = self.interpretTimeLog(line[GC.TIMELOG])
  769. for section, timingValue in lSections.items():
  770. # find, in which column this section should be written:
  771. for column, key in enumerate(self.sections.keys(), 1):
  772. if key == section:
  773. self.wc(tcNumber, column,
  774. timingValue[GC.TIMING_DURATION])
  775. continue
  776. @staticmethod
  777. def shortenTimingValue(timingValue):
  778. # TimingValue is seconds in Float. 2 decimals is enough:
  779. timingValue = int(float(timingValue) * 100)
  780. return timingValue / 100
  781. def writeCell(self, row, col, content, format=None):
  782. self.sheet.write(row, col, content, format)
  783. wc = writeCell
  784. def findAllTimingSections(self):
  785. """
  786. We try to have an ordered list of Timing Sequences. As each Testcase might have different sections we'll have
  787. to make guesses
  788. @return:
  789. """
  790. lSections = {}
  791. for key, line in self.testdataRecords.items():
  792. lTiming: dict = ExportTiming.interpretTimeLog(line[GC.TIMELOG])
  793. for key in lTiming.keys():
  794. if lSections.get(key):
  795. continue
  796. else:
  797. lSections[key] = None
  798. self.sections = lSections
  799. @staticmethod
  800. def interpretTimeLog(lTimeLog):
  801. """Example Time Log:
  802. Complete Testrun: Start: 1579553837.241974 - no end recorded
  803. TestCaseSequenceMaster: Start: 1579553837.243414 - no end recorded
  804. CustTestCaseMaster: Start: 1579553838.97329 - no end recorded
  805. Browser Start: , since last call: 2.3161418437957764
  806. Empfehlungen: , since last call: 6.440968036651611, ZIDs:[175aeac023237a73], TS:2020-01-20 21:57:46.525577
  807. Annahme_RABAZ: , since last call: 2.002716064453125e-05, ZIDs:[6be7d0a44e59acf6], TS:2020-01-20 21:58:37.203583
  808. Antrag drucken: , since last call: 9.075241088867188, ZIDs:[6be7d0a44e59acf6, b27c3875ddcbb4fa], TS:2020-01-20 21:58:38.040137
  809. Warten auf Senden an Bestand Button: , since last call: 1.3927149772644043
  810. Senden an Bestand: , since last call: 9.60469913482666, ZIDs:[66b12fa4869cf8a0, ad1f3d47c4694e26], TS:2020-01-20 21:58:49.472288
  811. where the first part before ":" is the section, "since last call:" is the duration, TS: is the timestamp
  812. Update 29.3.2020: Format changed to "since last call: 00:xx:xx,", rest looks identical.
  813. """
  814. lExport = {}
  815. lLines = lTimeLog.split("\n")
  816. for line in lLines:
  817. parts = line.split(",")
  818. if len(parts) < 2:
  819. continue
  820. if "Start:" in line:
  821. # Format <sequence>: <Start>: <time.loctime>
  822. continue
  823. else:
  824. lSection = parts[0].replace(":", "").strip()
  825. lDuration = parts[1].split("since last call: ")[1]
  826. lExport[lSection] = {GC.TIMING_DURATION: lDuration}
  827. return lExport