ExportResults.py 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934
  1. import xlsxwriter
  2. import logging
  3. import json
  4. import baangt.base.GlobalConstants as GC
  5. from baangt.base.Timing.Timing import Timing
  6. from baangt.base.Utils import utils
  7. from baangt.base.ExportResults.Append2BaseXLS import Append2BaseXLS
  8. from pathlib import Path
  9. from typing import Optional
  10. from xlsxwriter.worksheet import (
  11. Worksheet, cell_number_tuple, cell_string_tuple)
  12. from sqlalchemy import create_engine
  13. from sqlalchemy.orm import sessionmaker
  14. from baangt.base.DataBaseORM import DATABASE_URL, TestrunLog, TestCaseSequenceLog
  15. from baangt.base.DataBaseORM import TestCaseLog, TestCaseField, GlobalAttribute, TestCaseNetworkInfo
  16. from datetime import datetime
  17. from sqlite3 import IntegrityError
  18. from baangt import plugin_manager
  19. import re
  20. import csv
  21. from dateutil.parser import parse
  22. from uuid import uuid4
  23. from pathlib import Path
  24. from baangt.base.ExportResults.SendStatistics import Statistics
  25. from baangt.base.RuntimeStatistics import Statistic
  26. from openpyxl import load_workbook
  27. logger = logging.getLogger("pyC")
  28. class ExportResults:
  29. def __init__(self, **kwargs):
  30. self.kwargs = kwargs
  31. self.testList = []
  32. self.fileName = None
  33. self.testRunInstance = kwargs.get(GC.KWARGS_TESTRUNINSTANCE)
  34. self.testCasesEndDateTimes_1D = kwargs.get('testCasesEndDateTimes_1D')
  35. self.testCasesEndDateTimes_2D = kwargs.get('testCasesEndDateTimes_2D')
  36. self.networkInfo = self._get_network_info(kwargs.get('networkInfo'))
  37. self.testRunName = self.testRunInstance.testRunName
  38. self.dataRecords = self.testRunInstance.dataRecords
  39. self.stage = self.__getStageFromDataRecordsOrGlobalSettings()
  40. self.statistics = Statistics()
  41. self.statistics.update_data(kwargs)
  42. self.statistics.update_runtimeStatistic()
  43. self.logfile = logger.handlers[1].baseFilename
  44. try:
  45. self.exportFormat = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.EXPORT_FORMAT)
  46. if isinstance(self.exportFormat, dict):
  47. self.exportFormat = self.exportFormat.get(GC.EXPORT_FORMAT)
  48. if not self.exportFormat:
  49. self.exportFormat = GC.EXP_XLSX
  50. except KeyError:
  51. self.exportFormat = GC.EXP_XLSX
  52. try:
  53. if kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.STRUCTURE_TESTCASESEQUENCE)[1][1].get(GC.EXPORT_FILENAME):
  54. self.fileName = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.STRUCTURE_TESTCASESEQUENCE)[1][1].get(GC.EXPORT_FILENAME)
  55. except Exception as e:
  56. # fixme: I don't know, why this error came. When a Filename is set, then the above works.
  57. # No time now to debug.
  58. pass
  59. if not self.fileName:
  60. self.fileName = self.__getOutputFileName()
  61. logger.info("Export-Sheet for results: " + self.fileName)
  62. self.__removeUnwantedFields() # Will remove Password-Contents AND fields from data records, that came from
  63. # Globals-File.
  64. # export results to DB
  65. self.testcase_uuids = []
  66. self.exportToDataBase()
  67. if self.exportFormat == GC.EXP_XLSX:
  68. self.fieldListExport = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.EXPORT_FORMAT)["Fieldlist"]
  69. self.workbook = xlsxwriter.Workbook(self.fileName)
  70. self.summarySheet = self.workbook.add_worksheet("Summary")
  71. self.worksheet = self.workbook.add_worksheet("Output")
  72. self.jsonSheet = self.workbook.add_worksheet(f"{self.stage}_JSON")
  73. self.timingSheet = self.workbook.add_worksheet("Timing")
  74. self.cellFormatGreen = self.workbook.add_format()
  75. self.cellFormatGreen.set_bg_color('green')
  76. self.cellFormatRed = self.workbook.add_format()
  77. self.cellFormatRed.set_bg_color('red')
  78. self.cellFormatBold = self.workbook.add_format()
  79. self.cellFormatBold.set_bold(bold=True)
  80. self.summaryRow = 0
  81. self.__setHeaderDetailSheetExcel()
  82. self.makeSummaryExcel()
  83. self.exportResultExcel()
  84. self.exportJsonExcel()
  85. self.exportAdditionalData()
  86. self.write_json_sheet()
  87. self.exportTiming = ExportTiming(self.dataRecords,
  88. self.timingSheet)
  89. if self.networkInfo:
  90. self.networkSheet = self.workbook.add_worksheet("Network")
  91. self.exportNetWork = ExportNetWork(self.networkInfo,
  92. self.testCasesEndDateTimes_1D,
  93. self.testCasesEndDateTimes_2D,
  94. self.workbook,
  95. self.networkSheet)
  96. self.closeExcel()
  97. # Call functionality for potentially exporting data to other sheets/databases
  98. Append2BaseXLS(self.testRunInstance, self.fileName)
  99. elif self.exportFormat == GC.EXP_CSV:
  100. self.export2CSV()
  101. if self.testRunInstance.globalSettings.get("DeactivateStatistics") == "True":
  102. logger.debug("Send Statistics to server is deactivated. Not sending.")
  103. elif self.testRunInstance.globalSettings.get("DeactivateStatistics") is True:
  104. logger.debug("Send Statistics to server is deactivated. Not sending.")
  105. else:
  106. try:
  107. self.statistics.send_statistics()
  108. except Exception as ex:
  109. logger.debug(ex)
  110. self.update_result_in_testrun()
  111. def __removeUnwantedFields(self):
  112. lListPasswordFieldNames = ["PASSWORD", "PASSWORT", "PASSW"]
  113. if not self.testRunInstance.globalSettings.get("LetPasswords"):
  114. # If there's a password in GlobalSettings, remove the value:
  115. for key, value in self.testRunInstance.globalSettings.items():
  116. if key.upper() in lListPasswordFieldNames:
  117. self.testRunInstance.globalSettings[key] = "*" * 8
  118. # If there's a password in the datafile, remove the value
  119. # Also remove all columns, that are anyway included in the global settings
  120. for key, fields in self.dataRecords.items():
  121. fieldsToPop = []
  122. for field, value in fields.items():
  123. if field.upper() in lListPasswordFieldNames:
  124. self.dataRecords[key][field] = "*" * 8
  125. if field in self.testRunInstance.globalSettings.keys():
  126. fieldsToPop.append(field)
  127. for field in fieldsToPop:
  128. if field != 'Screenshots' and field != 'Stage': # Stage and Screenshot are needed in output file
  129. fields.pop(field)
  130. def exportAdditionalData(self):
  131. # Runs only, when KWARGS-Parameter is set.
  132. if self.kwargs.get(GC.EXPORT_ADDITIONAL_DATA):
  133. addExportData = self.kwargs[GC.EXPORT_ADDITIONAL_DATA]
  134. # Loop over the items. KEY = Tabname, Value = Data to be exported.
  135. # For data KEY = Fieldname, Value = Cell-Value
  136. for key, value in addExportData.items():
  137. lExport = ExportAdditionalDataIntoTab(tabname=key, valueDict=value, outputExcelSheet=self.workbook)
  138. lExport.export()
  139. def __getStageFromDataRecordsOrGlobalSettings(self):
  140. """
  141. If "STAGE" is not provided in the data fields (should actually not happen, but who knows),
  142. we shall take it from GlobalSettings. If also not there, take the default Value GC.EXECUTIN_STAGE_TEST
  143. :return:
  144. """
  145. value = None
  146. for key, value in self.dataRecords.items():
  147. break
  148. if not value.get(GC.EXECUTION_STAGE):
  149. stage = self.testRunInstance.globalSettings.get('TC.Stage', GC.EXECUTION_STAGE_TEST)
  150. else:
  151. stage = value.get(GC.EXECUTION_STAGE)
  152. return stage
  153. def export2CSV(self):
  154. """
  155. Writes CSV-File of datarecords
  156. """
  157. f = open(self.fileName, 'w', encoding='utf-8-sig', newline='')
  158. writer = csv.DictWriter(f, self.dataRecords[0].keys())
  159. writer.writeheader()
  160. for i in range(0, len(self.dataRecords) - 1):
  161. writer.writerow(self.dataRecords[i])
  162. f.close()
  163. def exportToDataBase(self):
  164. #
  165. # writes results to DB
  166. #
  167. logger.info(f'Export results to database at: {DATABASE_URL}')
  168. engine = create_engine(DATABASE_URL)
  169. # create a Session
  170. Session = sessionmaker(bind=engine)
  171. session = Session()
  172. # get timings
  173. timing: Timing = self.testRunInstance.timing
  174. start, end, duration = timing.returnTimeSegment(GC.TIMING_TESTRUN)
  175. # get status
  176. success = 0
  177. error = 0
  178. waiting = 0
  179. for value in self.dataRecords.values():
  180. if value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS:
  181. success += 1
  182. elif value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR:
  183. error += 1
  184. if value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_WAITING:
  185. waiting += 1
  186. self.statistics.update_attribute_with_value("TestCasePassed", success)
  187. self.statistics.update_attribute_with_value("TestCaseFailed", error)
  188. self.statistics.update_attribute_with_value("TestCasePaused", waiting)
  189. self.statistics.update_attribute_with_value("TestCaseExecuted", success + error + waiting)
  190. try:
  191. dic = {}
  192. for key in self.testRunInstance.json_dict:
  193. if "$(" in key:
  194. dic[key[2:-1]] = self.testRunInstance.json_dict[key]
  195. json_data = json.dumps(dic)
  196. except Exception as ex:
  197. logger.info(f"RLP Json error while updating in db : {str(ex)}")
  198. json_data = ""
  199. # get documents
  200. datafiles = self.fileName
  201. # create testrun object
  202. tr_log = TestrunLog(
  203. id=self.testRunInstance.uuid.bytes,
  204. testrunName=self.testRunName,
  205. logfileName=self.logfile,
  206. startTime=datetime.strptime(start, "%d-%m-%Y %H:%M:%S"),
  207. endTime=datetime.strptime(end, "%d-%m-%Y %H:%M:%S"),
  208. statusOk=success,
  209. statusFailed=error,
  210. statusPaused=waiting,
  211. dataFile=self.fileName,
  212. RLPJson=json_data,
  213. )
  214. # add to DataBase
  215. session.add(tr_log)
  216. # set globals
  217. for key, value in self.testRunInstance.globalSettings.items():
  218. globalVar = GlobalAttribute(
  219. name=key,
  220. value=str(value),
  221. testrun=tr_log,
  222. )
  223. session.add(globalVar)
  224. self.__save_commit(session)
  225. # create testcase sequence instance
  226. tcs_log = TestCaseSequenceLog(testrun=tr_log, number=1)
  227. # create testcases
  228. for tc_number, tc in enumerate(self.dataRecords.values(), 1):
  229. # get uuid
  230. uuid = uuid4()
  231. # create TestCaseLog instances
  232. tc_log = TestCaseLog(
  233. id=uuid.bytes,
  234. testcase_sequence=tcs_log,
  235. number=tc_number,
  236. )
  237. # store uuid
  238. self.testcase_uuids.append(uuid)
  239. session.add(tc_log)
  240. # add TestCase fields
  241. for key, value in tc.items():
  242. field = TestCaseField(name=key, value=str(value), testcase=tc_log)
  243. session.add(field)
  244. self.__save_commit(session)
  245. # network info
  246. if self.networkInfo:
  247. for entry in self.networkInfo:
  248. if type(entry.get('testcase')) == type(1):
  249. nw_info = TestCaseNetworkInfo(
  250. testcase=tcs_log.testcases[entry.get('testcase')-1],
  251. browserName=entry.get('browserName'),
  252. status=entry.get('status'),
  253. method=entry.get('method'),
  254. url=entry.get('url'),
  255. contentType=entry.get('contentType'),
  256. contentSize=entry.get('contentSize'),
  257. headers=str(entry.get('headers')),
  258. params=str(entry.get('params')),
  259. response=entry.get('response'),
  260. startDateTime=datetime.strptime(entry.get('startDateTime')[:19], '%Y-%m-%dT%H:%M:%S'),
  261. duration=entry.get('duration'),
  262. )
  263. session.add(nw_info)
  264. self.__save_commit(session)
  265. def __save_commit(self, session):
  266. try:
  267. session.commit()
  268. except IntegrityError as e:
  269. logger.critical(f"Integrity Error during commit to database: {e}")
  270. except Exception as e:
  271. logger.critical(f"Unknown error during database commit: {e}")
  272. def _get_test_case_num(self, start_date_time, browser_name):
  273. d_t = parse(start_date_time)
  274. d_t = d_t.replace(tzinfo=None)
  275. if self.testCasesEndDateTimes_1D:
  276. for index, dt_end in enumerate(self.testCasesEndDateTimes_1D):
  277. if d_t < dt_end:
  278. return index + 1
  279. elif self.testCasesEndDateTimes_2D:
  280. browser_num = re.findall(r"\d+\.?\d*", str(browser_name))[-1] \
  281. if re.findall(r"\d+\.?\d*", str(browser_name)) else 0
  282. dt_list_index = int(browser_num) if int(browser_num) > 0 else 0
  283. for i, tcAndDtEnd in enumerate(self.testCasesEndDateTimes_2D[dt_list_index]):
  284. if d_t < tcAndDtEnd[1]:
  285. return tcAndDtEnd[0] + 1
  286. return 'unknown'
  287. def _get_network_info(self, networkInfoDict):
  288. #
  289. # extracts network info data from the given dict
  290. #
  291. if networkInfoDict:
  292. extractedNetworkInfo = []
  293. for info in networkInfoDict:
  294. #extractedEntry = {}
  295. for entry in info['log']['entries']:
  296. # extract the current entry
  297. extractedNetworkInfo.append({
  298. 'testcase': self._get_test_case_num(entry['startedDateTime'], entry['pageref']),
  299. 'browserName': entry.get('pageref'),
  300. 'status': entry['response'].get('status'),
  301. 'method': entry['request'].get('method'),
  302. 'url': entry['request'].get('url'),
  303. 'contentType': entry['response']['content'].get('mimeType'),
  304. 'contentSize': entry['response']['content'].get('size'),
  305. 'headers': entry['response']['headers'],
  306. 'params': entry['request']['queryString'],
  307. 'response': entry['response']['content'].get('text'),
  308. 'startDateTime': entry['startedDateTime'],
  309. 'duration': entry.get('time'),
  310. })
  311. return extractedNetworkInfo
  312. return None
  313. def exportResultExcel(self, **kwargs):
  314. self._exportData()
  315. def exportJsonExcel(self):
  316. # headers
  317. headers = [
  318. 'Stage',
  319. 'UUID',
  320. 'Attribute',
  321. 'Value',
  322. ]
  323. # header style
  324. header_style = self.workbook.add_format()
  325. header_style.set_bold()
  326. # write header
  327. for index in range(len(headers)):
  328. self.jsonSheet.write(0, index, headers[index], header_style)
  329. # write data
  330. row = 0
  331. for index, testcase in self.dataRecords.items():
  332. # add TestCase fields
  333. for key, value in testcase.items():
  334. row += 1
  335. self.jsonSheet.write(row, 0, self.stage)
  336. self.jsonSheet.write(row, 1, str(self.testcase_uuids[index]))
  337. self.jsonSheet.write(row, 2, key)
  338. self.jsonSheet.write(row, 3, str(value))
  339. # Autowidth
  340. for n in range(len(headers)):
  341. ExcelSheetHelperFunctions.set_column_autowidth(self.jsonSheet, n)
  342. def write_json_sheet(self):
  343. # Used to write rlp_ json in individual sheets
  344. dic = self.testRunInstance.json_dict
  345. for js in dic:
  346. if not js:
  347. continue
  348. elif js[:2] == "$(":
  349. name = js[2:-1]
  350. else:
  351. name = js
  352. jsonSheet = self.workbook.add_worksheet(f"{self.stage}_{name}")
  353. if type(dic[js][0]) == dict: # Condition to get dictionary or dictionary inside list to write headers
  354. data_dic = dic[js][0]
  355. elif type(dic[js][0][0]) == dict:
  356. data_dic = dic[js][0][0]
  357. else:
  358. logger.debug(f"{dic[js]} is not json convertible.")
  359. continue
  360. remove_header = []
  361. for key in data_dic: # Removing headers which consist nested data
  362. if type(data_dic[key]) == list or type(data_dic[key]) == dict:
  363. remove_header.append(key)
  364. for key in remove_header:
  365. del data_dic[key]
  366. headers = []
  367. for index, header in enumerate(data_dic):
  368. jsonSheet.write(0, index, header)
  369. headers.append(header)
  370. row = 1
  371. for data in dic[js]:
  372. if not data:
  373. continue
  374. dt = {}
  375. for y, dt in enumerate(data):
  376. if type(dt) != dict: # for single dictionary data
  377. jsonSheet.write(row, y, data[dt])
  378. else: # if dictionaries are inside list
  379. column = 0 # used to update individual column
  380. for d in dt:
  381. if d not in headers:
  382. continue
  383. try:
  384. jsonSheet.write(row, column, dt[d])
  385. except Exception as ex:
  386. print(ex)
  387. column += 1
  388. row += 1
  389. if type(dt) != dict:
  390. row += 1
  391. def makeSummaryExcel(self):
  392. self.summarySheet.write(0, 0, f"Testreport for {self.testRunName}", self.cellFormatBold)
  393. self.summarySheet.set_column(0, last_col=0, width=15)
  394. # get testrunname my
  395. self.testList.append(self.testRunName)
  396. # Testrecords
  397. self.__writeSummaryCell("Testrecords", len(self.dataRecords), row=2, format=self.cellFormatBold)
  398. value = len([x for x in self.dataRecords.values()
  399. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS])
  400. self.testList.append(value) # Ok my
  401. if not value:
  402. value = ""
  403. self.__writeSummaryCell("Successful", value, format=self.cellFormatGreen)
  404. self.testList.append(value) # paused my
  405. self.__writeSummaryCell("Paused", len([x for x in self.dataRecords.values()
  406. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_WAITING]))
  407. value = len([x["Screenshots"] for x in self.dataRecords.values()
  408. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR])
  409. self.testList.append(value) # error my
  410. if not value:
  411. value = ""
  412. self.__writeSummaryCell("Error", value, format=self.cellFormatRed)
  413. # Logfile
  414. self.__writeSummaryCell("Logfile", logger.handlers[1].baseFilename, row=7)
  415. # get logfilename for database my
  416. self.testList.append(logger.handlers[1].baseFilename)
  417. # database id
  418. self.__writeSummaryCell("Testrun UUID", str(self.testRunInstance.uuid), row=8)
  419. # Timing
  420. timing: Timing = self.testRunInstance.timing
  421. start, end, duration = timing.returnTimeSegment(GC.TIMING_TESTRUN)
  422. self.testRun_end = end # used while updating timestamp in source file
  423. self.statistics.update_attribute_with_value("Duration", duration)
  424. self.statistics.update_attribute_with_value("TestRunUUID", str(self.testRunInstance.uuid))
  425. self.__writeSummaryCell("Starttime", start, row=10)
  426. # get start end during time my
  427. self.testList.append(start)
  428. self.testList.append(end)
  429. self.__writeSummaryCell("Endtime", end)
  430. self.__writeSummaryCell("Duration", duration, format=self.cellFormatBold)
  431. self.__writeSummaryCell("Avg. Dur", "")
  432. # Globals:
  433. self.__writeSummaryCell("Global settings for this testrun", "", format=self.cellFormatBold, row=15)
  434. for key, value in self.testRunInstance.globalSettings.items():
  435. if key.upper() in ["PASSWORD", "PASSWORT", "CONFLUENCE-PASSWORD"]:
  436. continue
  437. self.__writeSummaryCell(key, str(value))
  438. # get global data my
  439. self.testList.append(str(value))
  440. # Testcase and Testsequence setting
  441. self.summaryRow += 1
  442. self.__writeSummaryCell("TestSequence settings follow:", "", format=self.cellFormatBold)
  443. lSequence = self.testRunInstance.testRunUtils.getSequenceByNumber(testRunName=self.testRunName, sequence="1")
  444. if lSequence:
  445. for key, value in lSequence[1].items():
  446. if isinstance(value, list) or isinstance(value, dict):
  447. continue
  448. self.__writeSummaryCell(key, str(value))
  449. def __writeSummaryCell(self, lineHeader, lineText, row=None, format=None, image=False):
  450. if not row:
  451. self.summaryRow += 1
  452. else:
  453. self.summaryRow = row
  454. if not lineText:
  455. # If we have no lineText we want to apply format to the Header
  456. self.summarySheet.write(self.summaryRow, 0, lineHeader, format)
  457. else:
  458. self.summarySheet.write(self.summaryRow, 0, lineHeader)
  459. self.summarySheet.write(self.summaryRow, 1, lineText, format)
  460. def __getOutputFileName(self):
  461. l_file = Path(self.testRunInstance.managedPaths.getOrSetExportPath())
  462. if self.exportFormat == GC.EXP_XLSX:
  463. lExtension = '.xlsx'
  464. elif self.exportFormat == GC.EXP_CSV:
  465. lExtension = '.csv'
  466. else:
  467. logger.critical(f"wrong export file format: {self.exportFormat}, using 'xlsx' instead")
  468. lExtension = '.xlsx'
  469. l_file = l_file.joinpath("baangt_" + self.testRunName + "_" + utils.datetime_return() + lExtension)
  470. logger.debug(f"Filename for export: {str(l_file)}")
  471. return str(l_file)
  472. def __setHeaderDetailSheetExcel(self):
  473. # the 1st column is DB UUID
  474. self.worksheet.write(0, 0, 'UUID')
  475. # Add fields with name "RESULT_*" to output fields.
  476. i = 1
  477. self.__extendFieldList()
  478. for column in self.fieldListExport:
  479. self.worksheet.write(0, i, column)
  480. i += 1
  481. # add JSON field
  482. self.worksheet.write(0, len(self.fieldListExport)+1, "JSON")
  483. def __extendFieldList(self):
  484. """
  485. Fields, that start with "RESULT_" shall always be exported.
  486. Other fields, that shall always be exported are also added (Testcaseerrorlog, etc.)
  487. If global Parameter "TC.ExportAllFields" is set to True ALL fields will be exported
  488. @return:
  489. """
  490. if self.testRunInstance.globalSettings.get("TC.ExportAllFields", False):
  491. self.fieldListExport = [] # Make an empty list, so that we don't have duplicates
  492. for key in self.dataRecords[0].keys():
  493. self.fieldListExport.append(key)
  494. return
  495. try:
  496. for key in self.dataRecords[0].keys():
  497. if "RESULT_" in key.upper():
  498. if not key in self.fieldListExport:
  499. self.fieldListExport.append(key)
  500. except Exception as e:
  501. logger.critical(
  502. f'looks like we have no data in records: {self.dataRecords}, len of dataRecords: {len(self.dataRecords)}')
  503. # They are added here, because they'll not necessarily appear in the first record of the export data:
  504. if not GC.TESTCASEERRORLOG in self.fieldListExport:
  505. self.fieldListExport.append(GC.TESTCASEERRORLOG)
  506. if not GC.SCREENSHOTS in self.fieldListExport:
  507. self.fieldListExport.append(GC.SCREENSHOTS)
  508. if not GC.EXECUTION_STAGE in self.fieldListExport:
  509. self.fieldListExport.append(GC.EXECUTION_STAGE)
  510. def _exportData(self):
  511. for key, value in self.dataRecords.items():
  512. # write DB UUID
  513. try:
  514. self.worksheet.write(key + 1, 0, str(self.testcase_uuids[key]))
  515. # write RESULT fields
  516. for (n, column) in enumerate(self.fieldListExport):
  517. self.__writeCell(key + 1, n + 1, value, column)
  518. # Also write everything as JSON-String into the last column
  519. self.worksheet.write(key + 1, len(self.fieldListExport) + 1, json.dumps(value))
  520. except IndexError as e:
  521. logger.error(f"List of testcase_uuids didn't have a value for {key}. That shouldn't happen!")
  522. except BaseException as e:
  523. logger.error(f"Error happened where it shouldn't. Error was {e}")
  524. # Create autofilter
  525. self.worksheet.autofilter(0, 0, len(self.dataRecords.items()), len(self.fieldListExport))
  526. # Make cells wide enough
  527. for n in range(0, len(self.fieldListExport)):
  528. ExcelSheetHelperFunctions.set_column_autowidth(self.worksheet, n)
  529. def update_result_in_testrun(self):
  530. # To update source testrun file
  531. testrun_column = self.dataRecords[0]["testcase_column"]
  532. if testrun_column: # if testrun_column is greater than 0 that means testresult header is present in source file
  533. testrun_file = load_workbook(self.dataRecords[0]["testcase_file"])
  534. testrun_sheet = testrun_file.get_sheet_by_name(self.dataRecords[0]["testcase_sheet"])
  535. for key, value in self.dataRecords.items():
  536. print(value)
  537. data = f"TestCaseStatus: {value['TestCaseStatus']}\r\n" \
  538. f"Timestamp: {self.testRun_end}\r\n" \
  539. f"Duration: {value['Duration']}\r\n" \
  540. f"TCErrorLog: {value['TCErrorLog']}\r\n" \
  541. f"TestRun_UUID: {str(self.testRunInstance.uuid)}\r\n" \
  542. f"TestCase_UUID: {str(self.testcase_uuids[key])}\r\n\r\n"
  543. old_value = testrun_sheet.cell(value["testcase_row"] + 1, value["testcase_column"]).value or ""
  544. testrun_sheet.cell(value["testcase_row"] + 1, value["testcase_column"]).value = data + old_value
  545. testrun_file.save(self.dataRecords[0]["testcase_file"])
  546. logger.info(f"Source TestRun file {self.dataRecords[0]['testcase_file']} updated.")
  547. def __writeCell(self, line, cellNumber, testRecordDict, fieldName, strip=False):
  548. if fieldName in testRecordDict.keys() and testRecordDict[fieldName]:
  549. # Convert boolean for Output
  550. if isinstance(testRecordDict[fieldName], bool):
  551. testRecordDict[fieldName] = "True" if testRecordDict[fieldName] else "False"
  552. # Remove leading New-Line:
  553. if isinstance(testRecordDict[fieldName], str):
  554. if '\n' in testRecordDict[fieldName][0:5] or strip:
  555. testRecordDict[fieldName] = testRecordDict[fieldName].strip()
  556. # Do different stuff for Dicts and Lists:
  557. if isinstance(testRecordDict[fieldName], dict):
  558. self.worksheet.write(line, cellNumber, testRecordDict[fieldName])
  559. elif isinstance(testRecordDict[fieldName], list):
  560. if fieldName == GC.SCREENSHOTS:
  561. self.__attachScreenshotsToExcelCells(cellNumber, fieldName, line, testRecordDict)
  562. else:
  563. self.worksheet.write(line, cellNumber,
  564. utils.listToString(testRecordDict[fieldName]))
  565. else:
  566. if fieldName == GC.TESTCASESTATUS:
  567. if testRecordDict[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS:
  568. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]), self.cellFormatGreen)
  569. elif testRecordDict[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR:
  570. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]), self.cellFormatRed)
  571. elif fieldName == GC.SCREENSHOTS:
  572. self.__attachScreenshotsToExcelCells(cellNumber, fieldName, line, testRecordDict)
  573. else:
  574. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]))
  575. def __attachScreenshotsToExcelCells(self, cellNumber, fieldName, line, testRecordDict):
  576. # Place the screenshot images "on" the appropriate cell
  577. try:
  578. if type(testRecordDict[fieldName]) == list:
  579. if Path(testRecordDict[fieldName][-1]).is_file():
  580. self.worksheet.insert_image(line, cellNumber, testRecordDict[fieldName][-1], {'x_scale': 0.05,
  581. 'y_scale': 0.05})
  582. else:
  583. logger.error(f"Sceenshot file {testRecordDict[fieldName][-1]} can't be found")
  584. for nextScreenshotNumber in range(len(testRecordDict[fieldName]) - 1):
  585. if Path(testRecordDict[fieldName][nextScreenshotNumber]).is_file():
  586. self.worksheet.insert_image(line, len(self.fieldListExport) + nextScreenshotNumber + 1,
  587. testRecordDict[fieldName][nextScreenshotNumber],
  588. {'x_scale': 0.05, 'y_scale': 0.05})
  589. else:
  590. logger.error(f"Screenshot file {testRecordDict[fieldName][nextScreenshotNumber]} can't be found")
  591. else:
  592. if Path(testRecordDict[fieldName]).is_file():
  593. self.worksheet.insert_image(line, cellNumber, testRecordDict[fieldName], {'x_scale': 0.05,
  594. 'y_scale': 0.05})
  595. else:
  596. logger.error(f"Screenshot file {testRecordDict[fieldName]} can't be found")
  597. except Exception as e:
  598. logger.error(f"Problem with screenshots - can't attach them {e}")
  599. self.worksheet.set_row(line, 35)
  600. def closeExcel(self):
  601. self.workbook.close()
  602. # Next line doesn't work on MAC. Returns "not authorized"
  603. # subprocess.Popen([self.filename], shell=True)
  604. class ExportAdditionalDataIntoTab:
  605. def __init__(self, tabname, valueDict, outputExcelSheet: xlsxwriter.Workbook):
  606. self.tab = outputExcelSheet.add_worksheet(tabname)
  607. self.values = valueDict
  608. def export(self):
  609. self.makeHeader()
  610. self.writeLines()
  611. def makeHeader(self):
  612. for cellNumber, entries in self.values.items():
  613. for column, (key, value) in enumerate(entries.items()):
  614. self.tab.write(0, column, key)
  615. break # Write header only for first line.
  616. def writeLines(self):
  617. currentLine = 1
  618. for line, values in self.values.items():
  619. for column, (key, value) in enumerate(values.items()):
  620. self.tab.write(currentLine, column, value)
  621. currentLine += 1
  622. class ExcelSheetHelperFunctions:
  623. def __init__(self):
  624. pass
  625. @staticmethod
  626. def set_column_autowidth(worksheet: Worksheet, column: int):
  627. """
  628. Set the width automatically on a column in the `Worksheet`.
  629. !!! Make sure you run this function AFTER having all cells filled in
  630. the worksheet!
  631. """
  632. maxwidth = ExcelSheetHelperFunctions.get_column_width(worksheet=worksheet, column=column)
  633. if maxwidth is None:
  634. return
  635. elif maxwidth > 45:
  636. maxwidth = 45
  637. worksheet.set_column(first_col=column, last_col=column, width=maxwidth)
  638. @staticmethod
  639. def get_column_width(worksheet: Worksheet, column: int) -> Optional[int]:
  640. """Get the max column width in a `Worksheet` column."""
  641. strings = getattr(worksheet, '_ts_all_strings', None)
  642. if strings is None:
  643. strings = worksheet._ts_all_strings = sorted(
  644. worksheet.str_table.string_table,
  645. key=worksheet.str_table.string_table.__getitem__)
  646. lengths = set()
  647. for row_id, colums_dict in worksheet.table.items(): # type: int, dict
  648. data = colums_dict.get(column)
  649. if not data:
  650. continue
  651. if type(data) is cell_string_tuple:
  652. iter_length = len(strings[data.string])
  653. if not iter_length:
  654. continue
  655. lengths.add(iter_length)
  656. continue
  657. if type(data) is cell_number_tuple:
  658. iter_length = len(str(data.number))
  659. if not iter_length:
  660. continue
  661. lengths.add(iter_length)
  662. if not lengths:
  663. return None
  664. return max(lengths)
  665. class ExportNetWork:
  666. headers = ['BrowserName', 'TestCaseNum', 'Status', 'Method', 'URL', 'ContentType', 'ContentSize', 'Headers',
  667. 'Params', 'Response', 'startDateTime', 'Duration/ms']
  668. def __init__(self, networkInfo: dict, testCasesEndDateTimes_1D: list,
  669. testCasesEndDateTimes_2D: list, workbook: xlsxwriter.Workbook, sheet: xlsxwriter.worksheet):
  670. self.networkInfo = networkInfo
  671. #self.testCasesEndDateTimes_1D = testCasesEndDateTimes_1D
  672. #self.testCasesEndDateTimes_2D = testCasesEndDateTimes_2D
  673. self.workbook = workbook
  674. self.sheet = sheet
  675. header_style = self.get_header_style()
  676. self.write_header(style=header_style)
  677. self.set_column_align()
  678. self.write_content()
  679. self.set_column_width()
  680. def set_column_align(self):
  681. right_align_indexes = list()
  682. right_align_indexes.append(ExportNetWork.headers.index('ContentSize'))
  683. right_align_indexes.append(ExportNetWork.headers.index('Duration/ms'))
  684. right_align_style = self.get_column_style(alignment='right')
  685. left_align_style = self.get_column_style(alignment='left')
  686. [self.sheet.set_column(i, i, cell_format=right_align_style) if i in right_align_indexes else
  687. self.sheet.set_column(i, i, cell_format=left_align_style) for i in range(len(ExportNetWork.headers))]
  688. def set_column_width(self):
  689. [ExcelSheetHelperFunctions.set_column_autowidth(self.sheet, i) for i in range(len(ExportNetWork.headers))]
  690. def get_header_style(self):
  691. header_style = self.workbook.add_format()
  692. header_style.set_bg_color("#00CCFF")
  693. header_style.set_color("#FFFFFF")
  694. header_style.set_bold()
  695. header_style.set_border()
  696. return header_style
  697. def get_column_style(self, alignment=None):
  698. column_style = self.workbook.add_format()
  699. column_style.set_color("black")
  700. column_style.set_align('right') if alignment == 'right' \
  701. else column_style.set_align('left') if alignment == 'left' else None
  702. column_style.set_border()
  703. return column_style
  704. def write_header(self, style=None):
  705. for index, value in enumerate(ExportNetWork.headers):
  706. self.sheet.write(0, index, value, style)
  707. def _get_test_case_num(self, start_date_time, browser_name):
  708. d_t = parse(start_date_time)
  709. d_t = d_t.replace(tzinfo=None)
  710. if self.testCasesEndDateTimes_1D:
  711. for index, dt_end in enumerate(self.testCasesEndDateTimes_1D):
  712. if d_t < dt_end:
  713. return index + 1
  714. elif self.testCasesEndDateTimes_2D:
  715. browser_num = re.findall(r"\d+\.?\d*", str(browser_name))[-1] \
  716. if re.findall(r"\d+\.?\d*", str(browser_name)) else 0
  717. dt_list_index = int(browser_num) if int(browser_num) > 0 else 0
  718. for i, tcAndDtEnd in enumerate(self.testCasesEndDateTimes_2D[dt_list_index]):
  719. if d_t < tcAndDtEnd[1]:
  720. return tcAndDtEnd[0] + 1
  721. return 'unknown'
  722. def write_content(self):
  723. if not self.networkInfo:
  724. return
  725. #partition_index = 0
  726. for index in range(len(self.networkInfo)):
  727. data_list = [
  728. self.networkInfo[index]['browserName'],
  729. self.networkInfo[index]['testcase'],
  730. self.networkInfo[index]['status'],
  731. self.networkInfo[index]['method'],
  732. self.networkInfo[index]['url'],
  733. self.networkInfo[index]['contentType'],
  734. self.networkInfo[index]['contentSize'],
  735. self.networkInfo[index]['headers'],
  736. self.networkInfo[index]['params'],
  737. self.networkInfo[index]['response'],
  738. self.networkInfo[index]['startDateTime'],
  739. self.networkInfo[index]['duration'],
  740. ]
  741. for i in range(len(data_list)):
  742. self.sheet.write(index + 1, i, str(data_list[i]) or 'null')
  743. class ExportTiming:
  744. def __init__(self, testdataRecords: dict, sheet: xlsxwriter.worksheet):
  745. self.testdataRecords = testdataRecords
  746. self.sheet: xlsxwriter.worksheet = sheet
  747. self.sections = {}
  748. self.findAllTimingSections()
  749. self.writeHeader()
  750. self.writeLines()
  751. # Autowidth
  752. for n in range(0, len(self.sections) + 1):
  753. ExcelSheetHelperFunctions.set_column_autowidth(self.sheet, n)
  754. def writeHeader(self):
  755. self.wc(0, 0, "Testcase#")
  756. for index, key in enumerate(self.sections.keys(), start=1):
  757. self.wc(0, index, key)
  758. def writeLines(self):
  759. for tcNumber, (key, line) in enumerate(self.testdataRecords.items(), start=1):
  760. self.wc(tcNumber, 0, tcNumber)
  761. lSections = self.interpretTimeLog(line[GC.TIMELOG])
  762. for section, timingValue in lSections.items():
  763. # find, in which column this section should be written:
  764. for column, key in enumerate(self.sections.keys(), 1):
  765. if key == section:
  766. self.wc(tcNumber, column,
  767. timingValue[GC.TIMING_DURATION])
  768. continue
  769. @staticmethod
  770. def shortenTimingValue(timingValue):
  771. # TimingValue is seconds in Float. 2 decimals is enough:
  772. timingValue = int(float(timingValue) * 100)
  773. return timingValue / 100
  774. def writeCell(self, row, col, content, format=None):
  775. self.sheet.write(row, col, content, format)
  776. wc = writeCell
  777. def findAllTimingSections(self):
  778. """
  779. We try to have an ordered list of Timing Sequences. As each Testcase might have different sections we'll have
  780. to make guesses
  781. @return:
  782. """
  783. lSections = {}
  784. for key, line in self.testdataRecords.items():
  785. lTiming: dict = ExportTiming.interpretTimeLog(line[GC.TIMELOG])
  786. for key in lTiming.keys():
  787. if lSections.get(key):
  788. continue
  789. else:
  790. lSections[key] = None
  791. self.sections = lSections
  792. @staticmethod
  793. def interpretTimeLog(lTimeLog):
  794. """Example Time Log:
  795. Complete Testrun: Start: 1579553837.241974 - no end recorded
  796. TestCaseSequenceMaster: Start: 1579553837.243414 - no end recorded
  797. CustTestCaseMaster: Start: 1579553838.97329 - no end recorded
  798. Browser Start: , since last call: 2.3161418437957764
  799. Empfehlungen: , since last call: 6.440968036651611, ZIDs:[175aeac023237a73], TS:2020-01-20 21:57:46.525577
  800. Annahme_RABAZ: , since last call: 2.002716064453125e-05, ZIDs:[6be7d0a44e59acf6], TS:2020-01-20 21:58:37.203583
  801. Antrag drucken: , since last call: 9.075241088867188, ZIDs:[6be7d0a44e59acf6, b27c3875ddcbb4fa], TS:2020-01-20 21:58:38.040137
  802. Warten auf Senden an Bestand Button: , since last call: 1.3927149772644043
  803. Senden an Bestand: , since last call: 9.60469913482666, ZIDs:[66b12fa4869cf8a0, ad1f3d47c4694e26], TS:2020-01-20 21:58:49.472288
  804. where the first part before ":" is the section, "since last call:" is the duration, TS: is the timestamp
  805. Update 29.3.2020: Format changed to "since last call: 00:xx:xx,", rest looks identical.
  806. """
  807. lExport = {}
  808. lLines = lTimeLog.split("\n")
  809. for line in lLines:
  810. parts = line.split(",")
  811. if len(parts) < 2:
  812. continue
  813. if "Start:" in line:
  814. # Format <sequence>: <Start>: <time.loctime>
  815. continue
  816. else:
  817. lSection = parts[0].replace(":", "").strip()
  818. lDuration = parts[1].split("since last call: ")[1]
  819. lExport[lSection] = {GC.TIMING_DURATION: lDuration}
  820. return lExport