Browse Source

Merge branch 'results_browser' of https://gogs.earthsquad.global/athos/baangt into results_browser

aguryev 3 years ago
parent
commit
a75f87b64e

+ 1 - 1
.bumpversion.cfg

@@ -1,5 +1,5 @@
 [bumpversion]
-current_version = 1.1.15
+current_version = 1.1.20
 commit = True
 tag = True
 

+ 8 - 0
baangt/TestCaseSequence/TestCaseSequenceMaster.py

@@ -57,6 +57,14 @@ class TestCaseSequenceMaster:
                 recordPointer -= 1
                 break
             recordPointer += 1
+        try:
+            self.testdataDataBase.update_datarecords(self.dataRecords, fileName=utils.findFileAndPathFromPath(
+                self.testSequenceData[GC.DATABASE_FILENAME],
+                basePath=str(Path(self.testRunInstance.globalSettingsFileNameAndPath).parent)))
+        except Exception as e:
+            logger.error(f"Error during update_datarecords: {e}, Terminating.")
+            import sys
+            sys.exit(f"Error during update_datarecords: {e}")
         logger.info(f"{recordPointer + 1} test records read for processing")
         self.statistics.total_testcases(recordPointer + 1)
 

+ 120 - 5
baangt/TestDataGenerator/TestDataGenerator.py

@@ -10,10 +10,40 @@ import faker
 from random import sample, randint
 import baangt.base.GlobalConstants as GC
 import re
+from openpyxl import load_workbook
 
 logger = logging.getLogger("pyC")
 
 
+class Writer:
+    """
+    This class is made to update existing excel file.
+    First it will open the file in python and then we can do multiple writes and once everything is update we can use
+    save method in order to save the updated excel file. Hence, this class is very useful is saving time while updating
+    excel files.
+    """
+    def __init__(self, path):
+        self.path = path
+        self.workbook = load_workbook(self.path)
+
+    def write(self, row, data, sht):
+        # Update the values using row and col number.
+        # Note :- We are using openpyxl so row & column index will start from 1 instead of 0
+        column = 0
+        sheet = self.workbook[sht]
+        headers = next(sheet.rows)
+        for header in headers:  # checks if usecount header is present in sheet
+            if "usecount" in str(header.value).lower():
+                column = headers.index(header) + 1
+        if column:
+            sheet.cell(row, column).value = data
+
+
+    def save(self):
+        # Call this method to save the file once every updates are written
+        self.workbook.save(self.path)
+        self.workbook.close()
+
 class TestDataGenerator:
     """
     TestDataGenerator Class is to used to create a TestData file from raw excel file containing all possible values.
@@ -39,9 +69,13 @@ class TestDataGenerator:
             raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), self.path)
         self.sheet_dict, self.raw_data_json = self.__read_excel(self.path, self.sheet_name)
         self.remove_header = []
+        self.usecount_dict = {}  # used to maintain usecount limit record and verify if that non of the data cross limit
         self.processed_datas = self.__process_data(self.raw_data_json)
         self.headers = [x for x in list(self.processed_datas[0].keys()) if x not in self.remove_header]
+        self.headers = [x for x in self.headers if 'usecount' not in x.lower()]
+        self.writer = Writer(self.path)  # Writer class object to save file only in end, which will save time
         self.final_data = self.__generateFinalData(self.processed_datas)
+        self.writer.save()  # saving source input file once everything is done
 
     def write(self, OutputFormat=GC.TESTDATAGENERATOR_OUTPUT_FORMAT, batch_size=0, outputfile=None):
         """
@@ -203,6 +237,7 @@ class TestDataGenerator:
         :return: None
         """
         for data in data_list:
+            success = True  # if usecount data is present then this is used to keep track of it and not to add whole row in final
             data = list(data)
             done = {}
             for ind in dictionary:
@@ -218,9 +253,28 @@ class TestDataGenerator:
                                 sorted_data = [x for x in dictionary[ind] if x[header] == match]
                                 break
                         if not sorted_data:
-                            sorted_data = dictionary[ind]
-                        data_to_insert = sorted_data[randint(0, len(sorted_data) - 1)]
+                            sorted_data = list(dictionary[ind])
+                        remove_data = []  # Used to remove data with reached limit of usecount
+                        for dtt in sorted_data:  # this loop will check if data has reached the usecount limit and remove
+                            if self.usecount_dict[repr(dtt)]['limit'] == 0:
+                                continue
+                            if not self.usecount_dict[repr(dtt)]['use'] < self.usecount_dict[repr(dtt)]['limit']:
+                                remove_data.append(dtt)
+                        for dtt in remove_data:  # removing data from main data list
+                            logger.debug(f"UseCount limit of {dtt} is exceeded : {str(self.usecount_dict[repr(dtt)]['limit'])}")
+                            sorted_data.remove(dtt)
+                        if len(sorted_data) == 0:  # if the current loop has reached usecount the we need not to add whole row in final output
+                            success = False
+                            break
+                        elif len(sorted_data) == 1:
+                            data_to_insert = sorted_data[0]
+                        else:
+                            data_to_insert = sorted_data[randint(0, len(sorted_data) - 1)]
+                        self.usecount_dict[repr(data_to_insert)]['use'] += 1
                         for keys in data_to_insert:
+                            if "usecount" in keys.lower():  # removing usecount header from headers in final output
+                                self.update_usecount_in_source(data_to_insert)
+                                continue
                             if keys not in self.headers:
                                 self.headers.append(keys)
                             if keys not in done:
@@ -229,7 +283,8 @@ class TestDataGenerator:
                     else:
                         data_to_insert = dictionary[ind][randint(0, len(dictionary[ind]) - 1)]
                         data.insert(ind, data_to_insert)
-            final_list.append(data)
+            if success:
+                final_list.append(data)
 
     def __prefix_data_processing(self, dic, key, dictionary: dict):
         """
@@ -388,6 +443,9 @@ class TestDataGenerator:
             processed_datas = self.__processRrd(first_value, second_value, evaluated_dict, sheet_dict)
             processed_datas = data_type(processed_datas)
 
+        elif prefix == "Renv":
+            processed_datas = self.get_env_variable(raw_data)
+
         elif "-" in raw_data:
             raw_data = raw_data.split('-')
             start = raw_data[0].strip()
@@ -450,6 +508,9 @@ class TestDataGenerator:
                 else:
                     data_type = list
             else:
+                if raw_string[:5].lower() == "renv_":
+                    prefix = "Renv"
+                    raw_string = raw_string[5:]
                 data_type = list
         else:
             data_type = list
@@ -530,20 +591,61 @@ class TestDataGenerator:
         if type(data_looking_for) == str:
             data_looking_for = data_looking_for.split(",")
 
+        usecount, limit, usecount_header = self.check_usecount(base_sheet[0])
         for data in base_sheet:
+            if usecount_header:
+                used_limit = data[usecount_header]
+            else:
+                used_limit = 0
             if len(matching_data) == 1 and len(matching_data[0]) == 0:
                 if data_looking_for[0] == "*":
                     data_lis.append(data)
+                    self.usecount_dict[repr(data)] = {
+                        "use": used_limit, "limit": limit, "index": base_sheet.index(data) + 2, "sheet_name": sheet_name
+                    }
                 else:
-                    data_lis.append({keys: data[keys] for keys in data_looking_for})
+                    dt = {keys: data[keys] for keys in data_looking_for}
+                    data_lis.append(dt)
+                    self.usecount_dict[repr(dt)] = {
+                        "use" : used_limit, "limit" : limit, "index": base_sheet.index(data) + 2, "sheet_name": sheet_name
+                    }
             else:
                 if [data[key] for key in data_to_match] in matching_data:
                     if data_looking_for[0] == "*":
                         data_lis.append(data)
+                        self.usecount_dict[repr(data)] = {
+                        "use": used_limit, "limit": limit, "index": base_sheet.index(data) + 2, "sheet_name": sheet_name
+                    }
                     else:
-                        data_lis.append({keys: data[keys] for keys in data_looking_for})
+                        dt = {keys: data[keys] for keys in data_looking_for}
+                        data_lis.append(dt)
+                        self.usecount_dict[repr(dt)] = {
+                        "use" : used_limit, "limit" : limit, "index": base_sheet.index(data) + 2, "sheet_name": sheet_name
+                    }
         return data_lis
 
+    def check_usecount(self, data):
+        # used to find and return if their is usecount header and limit in input file
+        usecount = False
+        limit = 0
+        usecount_header = None
+        for header in data:
+            if "usecount" in header.lower():
+                usecount = True
+                usecount_header = header
+                if "usecount_" in header.lower():
+                    try:
+                        limit = int(header.lower().strip().split("count_")[1])
+                    except:
+                        limit = 0
+        return usecount, limit, usecount_header
+
+    def update_usecount_in_source(self, data):
+        self.writer.write(
+            self.usecount_dict[repr(data)]["index"], self.usecount_dict[repr(data)]["use"],
+            self.usecount_dict[repr(data)]["sheet_name"]
+        )
+
     def __process_rrd_string(self, rrd_string):
         """
         This method is used to validate rrd_strings provided by the user.
@@ -594,6 +696,19 @@ class TestDataGenerator:
         assert match, err_string
         return processed_string
 
+    @staticmethod
+    def get_env_variable(string):
+        variable = string[1:-1].strip().split(',')[0].strip()
+        data = os.environ.get(variable)
+        try:
+            if not data:
+                data = string[1:-1].strip().split(',')[1].strip()
+                logger.info(f"{variable} not found in environment, using {data} instead")
+        except:
+            raise BaseException(f"Can't find {variable} in envrionment & default value is also not set")
+        return data
+
+
 if __name__ == "__main__":
     lTestDataGenerator = TestDataGenerator("../../tests/0TestInput/RawTestData.xlsx")
     lTestDataGenerator.write()

+ 4 - 3
baangt/TestSteps/TestStepMaster.py

@@ -586,9 +586,10 @@ class TestStepMaster:
                     centerValue = ""
                 else:
                     raise BaseException(f"Variable not found: {center}, input parameter was: {expression}")
-
-            expression = "".join([left_part, str(centerValue), right_part])
-
+            if not isinstance(centerValue, list) and not isinstance(centerValue, dict):
+                expression = "".join([left_part, str(centerValue), right_part])
+            else:
+                expression = centerValue
         return expression
 
     def iterate_json(self, data, key):

+ 17 - 15
baangt/base/BrowserHandling/BrowserHandling.py

@@ -356,27 +356,29 @@ class BrowserDriver:
         returnValue = None
         start = time.time()
         duration = 0
+        retry = True
 
-        while not self.element and duration < timeout:
+        while retry and duration < timeout:
             self.element, self.html = self.findBy(id=id, css=css, xpath=xpath, class_name=class_name, iframe=iframe, timeout=timeout / 3,
                             optional=optional)
             time.sleep(0.5)
             duration = time.time() - start
 
-        if self.element:
-            try:
-                if len(self.element.text) > 0:
-                    returnValue = self.element.text
-                elif self.element.tag_name == 'input':
-                    #  element is of type <input />
-                    returnValue = self.element.get_property('value')
-                else:
-                    returnValue = None
-            except Exception as e:
-                logger.debug(f"Exception during findByAndWaitForValue, but continuing {str(e)}, "
-                             f"Locator: {self.browserData.locatorType} = {self.browserData.locator}")
-        else:
-            logger.info(f"Couldn't find value for element {self.browserData.locatorType}:{self.browserData.locator}")
+            if self.element:
+                try:
+                    if len(self.element.text) > 0:
+                        returnValue = self.element.text.strip()
+                    elif self.element.tag_name == 'input':
+                        #  element is of type <input />
+                        returnValue = self.element.get_property('value').strip()
+                except Exception as e:
+                    logger.debug(f"Exception during findByAndWaitForValue, but continuing {str(e)}, "
+                                 f"Locator: {self.browserData.locatorType} = {self.browserData.locator}")
+            else:
+                logger.info(f"Couldn't find value for element {self.browserData.locatorType}:{self.browserData.locator}")
+
+            if returnValue and len(returnValue.strip()) > 0:
+                return returnValue
 
         return returnValue
 

+ 7 - 3
baangt/base/ExportResults/Append2BaseXLS.py

@@ -34,12 +34,16 @@ class Append2BaseXLS:
             fileTuples.append(self.checkAppend(lGlobals["AR2BXLS"]))
 
         for fileTuple in fileTuples:
-            logger.debug(f"Starting to append results to: {str(fileTuple)}")
+            if not fileTuple:
+                logger.critical("File to append results to not found (see message above")
+                break
+            logger.info(f"Starting to append results to: {str(fileTuple)}")
             lMover = Mover(source_file_path=self.resultsFileName,
                            source_sheet="Output",
                            destination_file_path=fileTuple[0],
-                           destination_sheet=fileTuple[1])
-            lMover.move(add_missing_columns=False)
+                           destination_sheet=fileTuple[1].strip())
+            lMover.move(filters={GC.TESTCASESTATUS:GC.TESTCASESTATUS_SUCCESS}, add_missing_columns=False)
+            logger.debug(f"Appending results to {str(fileTuple)} finished")
 
     def checkAppend(self, file):
         lFileAndPath = self.mp.findFileInAnyPath(filename=file.split(",")[0])

+ 27 - 5
baangt/base/ExportResults/ExportResults.py

@@ -23,6 +23,7 @@ from uuid import uuid4
 from pathlib import Path
 from baangt.base.ExportResults.SendStatistics import Statistics
 from baangt.base.RuntimeStatistics import Statistic
+from openpyxl import load_workbook
 
 logger = logging.getLogger("pyC")
 
@@ -120,6 +121,7 @@ class ExportResults:
                 self.statistics.send_statistics()
             except Exception as ex:
                 logger.debug(ex)
+        self.update_result_in_testrun()
 
     def __removeUnwantedFields(self):
         lListPasswordFieldNames = ["PASSWORD", "PASSWORT", "PASSW"]
@@ -134,15 +136,13 @@ class ExportResults:
             for key, fields in self.dataRecords.items():
                 fieldsToPop = []
                 for field, value in fields.items():
-                    if field.upper() in ["PASSWORD", "PASSWORT"]:
+                    if field.upper() in lListPasswordFieldNames:
                         self.dataRecords[key][field] = "*" * 8
                     if field in self.testRunInstance.globalSettings.keys():
-                        logger.debug(
-                            f"Added {field} to fields to be removed from data record as it exists in GlobalSettings already.")
                         fieldsToPop.append(field)
                 for field in fieldsToPop:
-                    logger.debug(f"Removed field {field} from data record.")
-                    fields.pop(field)
+                    if field != 'Screenshots' and field != 'Stage':   # Stage and Screenshot are needed in output file
+                        fields.pop(field)
 
     def exportAdditionalData(self):
         # Runs only, when KWARGS-Parameter is set.
@@ -464,6 +464,7 @@ class ExportResults:
         # Timing
         timing: Timing = self.testRunInstance.timing
         start, end, duration = timing.returnTimeSegment(GC.TIMING_TESTRUN)
+        self.testRun_end = end  # used while updating timestamp in source file
         self.statistics.update_attribute_with_value("Duration", duration)
         self.statistics.update_attribute_with_value("TestRunUUID", str(self.testRunInstance.uuid))
         self.__writeSummaryCell("Starttime", start, row=10)
@@ -477,6 +478,8 @@ class ExportResults:
         # Globals:
         self.__writeSummaryCell("Global settings for this testrun", "", format=self.cellFormatBold, row=15)
         for key, value in self.testRunInstance.globalSettings.items():
+            if key.upper() in ["PASSWORD", "PASSWORT", "CONFLUENCE-PASSWORD"]:
+                continue
             self.__writeSummaryCell(key, str(value))
             # get global data my
             self.testList.append(str(value))
@@ -587,6 +590,25 @@ class ExportResults:
         for n in range(0, len(self.fieldListExport)):
             ExcelSheetHelperFunctions.set_column_autowidth(self.worksheet, n)
 
+    def update_result_in_testrun(self):
+        # To update source testrun file
+        testrun_column = self.dataRecords[0]["testcase_column"]
+        if testrun_column:  # if testrun_column is greater than 0 that means testresult header is present in source file
+            testrun_file = load_workbook(self.dataRecords[0]["testcase_file"])
+            testrun_sheet = testrun_file.get_sheet_by_name(self.dataRecords[0]["testcase_sheet"])
+            for key, value in self.dataRecords.items():
+                print(value)
+                data = f"TestCaseStatus: {value['TestCaseStatus']}\r\n" \
+                       f"Timestamp: {self.testRun_end}\r\n" \
+                       f"Duration: {value['Duration']}\r\n" \
+                       f"TCErrorLog: {value['TCErrorLog']}\r\n" \
+                       f"TestRun_UUID: {str(self.testRunInstance.uuid)}\r\n" \
+                       f"TestCase_UUID: {str(self.testcase_uuids[key])}\r\n\r\n"
+                old_value = testrun_sheet.cell(value["testcase_row"] + 1, value["testcase_column"]).value or ""
+                testrun_sheet.cell(value["testcase_row"] + 1, value["testcase_column"]).value = data + old_value
+            testrun_file.save(self.dataRecords[0]["testcase_file"])
+            logger.info(f"Source TestRun file {self.dataRecords[0]['testcase_file']} updated.")
+
     def __writeCell(self, line, cellNumber, testRecordDict, fieldName, strip=False):
         if fieldName in testRecordDict.keys() and testRecordDict[fieldName]:
             # Convert boolean for Output

+ 111 - 14
baangt/base/HandleDatabase.py

@@ -10,10 +10,41 @@ from pathlib import Path
 import xl2dict
 import re
 from random import randint
+from openpyxl import load_workbook
+from baangt.TestDataGenerator.TestDataGenerator import TestDataGenerator
 
 logger = logging.getLogger("pyC")
 
 
+class Writer:
+    """
+    This class is made to update existing excel file.
+    First it will open the file in python and then we can do multiple writes and once everything is update we can use
+    save method in order to save the updated excel file. Hence, this class is very useful is saving time while updating
+    excel files.
+    """
+    def __init__(self, path):
+        self.path = path
+        self.workbook = load_workbook(self.path)
+
+    def write(self, row, data, sht):
+        # Update the values using row and col number.
+        # Note :- We are using openpyxl so row & column index will start from 1 instead of 0
+        column = 0
+        sheet = self.workbook[sht]
+        headers = next(sheet.rows)
+        for header in headers:  # checks if usecount header is present in sheet
+            if "usecount" in str(header.value).lower():
+                column = headers.index(header) + 1
+        if column:
+            sheet.cell(row, column).value = data
+
+    def save(self):
+        # Call this method to save the file once every updates are written
+        self.workbook.save(self.path)
+        self.workbook.close()
+
+
 class HandleDatabase:
     def __init__(self, linesToRead, globalSettings=None):
         self.lineNumber = 3
@@ -113,6 +144,13 @@ class HandleDatabase:
         # read header values into the list
         keys = [sheet.cell(0, col_index).value for col_index in range(sheet.ncols)]
 
+        # if testresult header is present then taking its index, which is later used as column number
+        testrun_index = [keys.index(x) for x in keys if str(x).lower() == "testresult"]
+        if testrun_index:
+            testrun_index = testrun_index[0] + 1  # adding +1 value which is the correct column position
+        else:  # if list is empty that means their is no testresult header
+            testrun_index = 0
+
         for row_index in range(1, sheet.nrows):
             temp_dic = {}
             for col_index in range(sheet.ncols):
@@ -121,10 +159,18 @@ class HandleDatabase:
                     temp_dic[keys[col_index]] = repr(temp_dic[keys[col_index]])
                     if temp_dic[keys[col_index]][-2:] == ".0":
                         temp_dic[keys[col_index]] = temp_dic[keys[col_index]][:-2]
-
+            # row, column, sheetName & fileName which are later used in updating source testrun file
+            temp_dic["testcase_row"] = row_index
+            temp_dic["testcase_sheet"] = sheetName
+            temp_dic["testcase_file"] = fileName
+            temp_dic["testcase_column"] = testrun_index
             self.dataDict.append(temp_dic)
+        self.usecount_dict = {}  # used to maintain usecount limit record and verify if that non of the data cross limit
+        self.writer = Writer(fileName)  # Writer class object to save file only in end, which will save time
 
-        for temp_dic in self.dataDict:
+    def update_datarecords(self, dataDict, fileName):
+        for td in dataDict:
+            temp_dic = dataDict[td]
             new_data_dic = {}
             for keys in temp_dic:
                 if type(temp_dic[keys]) != str:
@@ -150,6 +196,8 @@ class HandleDatabase:
                         new_data_dic[data] = rre_data[data]
                 elif str(temp_dic[keys][:4]) == "RLP_":
                     temp_dic[keys] = self.rlp_process(temp_dic[keys], fileName)
+                elif str(temp_dic[keys][:5]).upper() == "RENV_":
+                    temp_dic[keys] = str(TestDataGenerator.get_env_variable(temp_dic[keys][5:]))
                 else:
                     try:
                         js = json.loads(temp_dic[keys])
@@ -158,6 +206,7 @@ class HandleDatabase:
                         pass
             for key in new_data_dic:
                 temp_dic[key] = new_data_dic[key]
+        self.writer.save()  # saving source input file once everything is done
 
     def rlp_process(self, string, fileName):
         # Will get real data from rlp_ prefix string
@@ -202,7 +251,8 @@ class HandleDatabase:
                     lAppend = False
         return lAppend
 
-    def __processRrd(self, sheet_name, data_looking_for, data_to_match: dict, sheet_dict=None, caller="RRD_"):
+    def __processRrd(self, sheet_name, data_looking_for, data_to_match: dict, sheet_dict=None, caller="RRD_",
+                     file_name=None):
         """
         For more detail please refer to TestDataGenerator.py
         :param sheet_name:
@@ -213,26 +263,65 @@ class HandleDatabase:
         sheet_dict = self.sheet_dict if sheet_dict is None else sheet_dict
         matching_data = [list(x) for x in itertools.product(*[data_to_match[key] for key in data_to_match])]
         assert sheet_name in sheet_dict, \
-            f"Excel file doesn't contain {sheet_name} sheet. Please recheck. Called in '{caller}'"
+            f"Excel file {file_name} doesn't contain {sheet_name} sheet. Please recheck. Called in '{caller}'"
         base_sheet = sheet_dict[sheet_name]
         data_lis = []
         if type(data_looking_for) == str:
             data_looking_for = data_looking_for.split(",")
 
+        usecount, limit, usecount_header = self.check_usecount(base_sheet[0])
+
         for data in base_sheet:
+            dt = ""
             if len(matching_data) == 1 and len(matching_data[0]) == 0:
                 if data_looking_for[0] == "*":
-                    data_lis.append(data)
+                    dt = data
+                    data_lis.append(dt)
                 else:
-                    data_lis.append({keys: data[keys] for keys in data_looking_for})
+                    dt = {keys: data[keys] for keys in data_looking_for}
+                    data_lis.append(dt)
             else:
                 if [data[key] for key in data_to_match] in matching_data:
                     if data_looking_for[0] == "*":
-                        data_lis.append(data)
+                        dt = data
+                        data_lis.append(dt)
+                    else:
+                        dt = {keys: data[keys] for keys in data_looking_for}
+                        data_lis.append(dt)
+            if dt:
+                if repr(dt) not in self.usecount_dict:
+                    if usecount_header:
+                        if data[usecount_header]:
+                            used_limit = int(data[usecount_header])
+                        else:
+                            used_limit = 0
                     else:
-                        data_lis.append({keys: data[keys] for keys in data_looking_for})
+                        used_limit = 0
+                    self.usecount_dict[repr(dt)] = {
+                        "use": used_limit, "limit": limit, "index": base_sheet.index(data) + 2, "sheet_name": sheet_name
+                    }
+                else:
+                    if limit:
+                        if not self.usecount_dict[repr(dt)]["use"] < self.usecount_dict[repr(dt)]["limit"]:
+                            data_lis.remove(dt)
         return data_lis
 
+    def check_usecount(self, data):
+        # used to find and return if their is usecount header and limit in input file
+        usecount = False
+        limit = 0
+        usecount_header = None
+        for header in data:
+            if "usecount" in header.lower():
+                usecount = True
+                usecount_header = header
+                if "usecount_" in header.lower():
+                    try:
+                        limit = int(header.lower().strip().split("count_")[1])
+                    except:
+                        limit = 0
+        return usecount, limit, usecount_header
+
     def __rrd_string_to_python(self, raw_data, fileName):
         """
         Convert string to python data types
@@ -259,11 +348,18 @@ class HandleDatabase:
             second_value = self.__splitList(second_value)
         if first_value not in self.sheet_dict:
             self.sheet_dict, _ = self.__read_excel(path=fileName)
-        processed_datas = self.__processRrd(first_value, second_value, evaluated_dict)
-        assert len(processed_datas)>0, f"No matching data for RRD_. Please check the input file. Was searching for " \
+        processed_datas = self.__processRrd(first_value, second_value, evaluated_dict, file_name=fileName)
+        assert len(processed_datas)>0, f"No matching data for RRD_. Please check the input file {fileName}. Was searching for " \
                                        f"{first_value}, {second_value} and {str(evaluated_dict)} " \
-                                       f"but didn't find anything"
-        return processed_datas[randint(0, len(processed_datas)-1)]
+                                       f"but didn't find anything. Also please check the usecount limit if their is any."
+        final_data = processed_datas[randint(0, len(processed_datas)-1)]
+        if repr(final_data) in self.usecount_dict:
+            self.usecount_dict[repr(final_data)]["use"] += 1
+            self.writer.write(
+                self.usecount_dict[repr(final_data)]["index"], self.usecount_dict[repr(final_data)]["use"],
+                self.usecount_dict[repr(final_data)]["sheet_name"]
+            )
+        return final_data
 
     def __rre_string_to_python(self, raw_data):
         """
@@ -295,7 +391,8 @@ class HandleDatabase:
         assert len(processed_datas)>0, f"No matching data for RRD_. Please check the input file. Was searching for " \
                                        f"{first_value}, {second_value} and {str(evaluated_dict)} " \
                                        f"but didn't find anything"
-        return processed_datas[randint(0, len(processed_datas)-1)]
+        final_data = randint(0, len(processed_datas)-1)
+        return processed_datas[final_data]
 
     def __rlp_string_to_python(self, raw_data, fileName):
         # will convert rlp string to python
@@ -386,7 +483,7 @@ class HandleDatabase:
         if sheet_name == "":
             base_sheet = sheet_dict[sheet_lis[0]]
         else:
-            assert sheet_name in sheet_dict, f"Excel file doesn't contain {sheet_name} sheet. Please recheck."
+            assert sheet_name in sheet_dict, f"Excel file {path} doesn't contain {sheet_name} sheet. Please recheck."
             base_sheet = sheet_dict[sheet_name]
         self.sheet_dict = sheet_dict
         self.base_sheet = base_sheet

+ 6 - 1
baangt/base/TestRun/TestRun.py

@@ -26,6 +26,7 @@ from uuid import uuid4
 from baangt.base.RuntimeStatistics import Statistic
 from baangt.base.SendReports import Sender
 import signal
+from baangt.TestDataGenerator.TestDataGenerator import TestDataGenerator
 
 logger = logging.getLogger("pyC")
 
@@ -322,13 +323,17 @@ class TestRun:
             if isinstance(value, str):
                 if value.lower() in ("false", "true", "no", "x"):
                     self.globalSettings[key] = utils.anything2Boolean(value)
-
+                elif "renv_" in value.lower():
+                    self.globalSettings[key] = TestDataGenerator.get_env_variable(value[5:])
             if isinstance(value, dict):
                 if "default" in value:
                     # This happens in the new UI, if a value was added manually,
                     # but is not part of the globalSetting.json. In this case there's the whole shebang in a dict. We
                     # are only interested in the actual value, which is stored in "default":
                     self.globalSettings[key] = value["default"]
+                    if isinstance(self.globalSettings[key], str):
+                        if "renv_" in self.globalSettings[key].lower():
+                            self.globalSettings[key] = TestDataGenerator.get_env_variable(self.globalSettings[key][5:])
                     continue
                 else:
                     # This could be the "old" way of the globals-file (with {"HEADLESS":"True"})

+ 40 - 0
baangt/ui/pyqt/uimain.py

@@ -93,6 +93,7 @@ class MainWindow(Ui_MainWindow):
         self.__log_file = ""
         self.__open_files = 0
         self.TDGResult = ""
+        self.dataFile = ""
 
         # self.refreshNew()
         # self.setupBasePath(self.directory)
@@ -544,6 +545,39 @@ class MainWindow(Ui_MainWindow):
             self.run_process.waitForFinished(3000)
             self.run_process.kill()
 
+    def update_datafile(self):
+        from baangt.base.TestRun.TestRun import TestRun
+        testRunFile = f"{Path(self.directory).joinpath(self.testRunFile)}"
+        globalsFile = self.configFile
+        uu = uuid4()
+        tr = TestRun(testRunFile, globalsFile, uuid=uu, executeDirect=False)
+        tr._initTestRunSettingsFromFile()
+        if "TC.TestDataFileName" in tr.globalSettings:
+            self.dataFile = tr.globalSettings["TC.TestDataFileName"]
+        else:
+            tr._loadJSONTestRunDefinitions()
+            tr._loadExcelTestRunDefinitions()
+            self.dataFile = self.findKeyFromDict(tr.testRunUtils.testRunAttributes, "TestDataFileName")
+
+    def findKeyFromDict(self, dic, key):
+        if isinstance(dic, list):
+            for data in dic:
+                if isinstance(dic, list) or isinstance(dic, dict):
+                    result = self.findKeyFromDict(data, key)
+                    if result:
+                        return result
+        elif isinstance(dic, dict):
+            for k in dic:
+                if k == key:
+                    return dic[k]
+                elif isinstance(dic[k], list) or isinstance(dic[k], dict):
+                    result = self.findKeyFromDict(dic[k], key)
+                    if result:
+                        return result
+        return ""
+
+
+
     def signalCtrl(self, qProcess, ctrlEvent=None):
         import win32console, win32process, win32api, win32con
         if ctrlEvent is None:
@@ -1256,6 +1290,12 @@ class MainWindow(Ui_MainWindow):
             fileName = os.path.basename(filePathName)
             self.statusMessage(f"Opening file {fileName}", 3000)
             FilesOpen.openResultFile(filePathName)
+            self.update_datafile()
+            if self.dataFile:
+                self.statusMessage(f"Opening file {self.dataFile}", 3000)
+                PathName = f"{Path(self.directory).joinpath(self.dataFile)}"
+                Name = os.path.basename(PathName)
+                FilesOpen.openResultFile(PathName)
         except:
             self.statusMessage("No file found!", 3000)
 

BIN
docs/DataGeneratorInput.png


+ 16 - 4
docs/Datagenerator.rst

@@ -22,6 +22,7 @@ This image is an example input file. Different types of data types supported are
   8. ``FKR_`` prefix is used here with a new integer value 0 in end.
   9. ``RRD_`` prefix is used here.
   10. ``RRE_`` prefix is used here.
+  11. ``RENV_`` prefix is used here.
 
 Using these data type we will generate all possible values.
 Here is a simple example with simple value and value of list.
@@ -100,9 +101,19 @@ We will use the reference of above image and assigned number to learn about it i
      i.e. First ``RRD_`` cell has value "x" for the header while selected randomly, then the second cell will select data
      randomly only from the rows which have "x" value for the same header.
   10. ``RRE_`` is same as ``RRD_`` only change is that in rrd we take data from same file and different sheet but, in
-     this ``RRE_`` prefix we can take data from another file. The only change in its structure is that filename comes
-     before sheetname.
-     i.e. ``RRE_[fileName,sheetName,Targetdata,[key:value]]``
+      this ``RRE_`` prefix we can take data from another file. The only change in its structure is that filename comes
+      before sheetname.
+      i.e. ``RRE_[fileName,sheetName,Targetdata,[key:value]]``
+  11. ``RENV_`` prefix is used to get environment varaible from your system. Their might be sometimes when you don't
+      want to input sensitive data like password, username, etc. directly inside TestRun file or Globals file, at that
+      time this prefix will be very useful and it will get the data from your system environment.
+      Its structure is ``RENV_(<env_variable>,<default>)`` here "<env_variable>" holds the place of variable name which
+      contains data and "<default>" holds the default value which is used in case given variable is not present in
+      environment. If "<default>" value is not supplied and given variable is also not present in environment then
+      it will raise an error.
+      e.g.- ``RENV(USERNAME, My_Pc)``
+      Here it will first look for "USERNAME" variable in environment, if it is not present then it will use "My_Pc"
+
 
 
 All Data Types Format
@@ -116,4 +127,5 @@ All Data Types Format
 6. List of header    = ``[<title1>, <title2>, <title3>]``
 7. Faker Prefix      = ``FKR_(<type>, <locale>, <number_of_data>)``
 8. RRD Prefix        = ``RRD_(<sheetName>,<TargetData>,[<Header1>:[<Value1>],<Header2>:[<Value1>,<Value2>]])``
-9. RRE Prefix        = ``RRE_(<fileName>,<sheetName>,<TargetData>,[<Header1>:[<Value1>],<Header2>:[<Value1>,<Value2>]])``
+9. RRE Prefix        = ``RRE_(<fileName>,<sheetName>,<TargetData>,[<Header1>:[<Value1>],<Header2>:[<Value1>,<Value2>]])``
+10. RENV Prefix      = ``RENV_(<env_variable>,<default>)``

+ 3 - 4
docs/PlannedFeatures.rst

@@ -5,10 +5,10 @@ We implement all features for 3 operating Systems (Mac, Windows, Ubuntu and Ubun
 Short/Medium term features
 ---------------------------
 * Improve template for TestCaseDefinitions
-* DataFiles and TestDataGenerator: Read remote data sources (e.g. other sheets or SQL-Databases)
-* DataFiles: Nested data structures per line item (e.g. Sales order header --> Sales order item)
 * Better support for scraping
-* ELSE-Activity and nested IF/ELSE/ENDIF-Activities
+* Improved reporting on test runs/Test cases, etc.
+* Improved handling of test data base entities (e.g. use-counters for each object)
+
 
 Features for later
 ------------------
@@ -19,7 +19,6 @@ Features for later
 * Improved support for Mass testing APIs
 * Katalon Importer/Converter as Webservice
 * Integration with Atlassian Confluence (for Testcase and Testrun definitions)
-* Integration with Atlassian Confluence (to publish results of testruns)
 * Better support for oData V4.0 (similar to SOAP)
 * Support for GraphQL via Graphene
 * Multi-Language interface (I18n)

+ 14 - 1
docs/changelog.rst

@@ -1,7 +1,20 @@
 Change log
 ==========
 
-1.1.4
+1.1.15
+^^^^^^^
+
+Summary:
+++++++++
+
+* Export (all channels): Passwords are replaced by ``*******``
+* ELSE-Activity and nested IF/ELSE/ENDIF-Activities
+* DataFiles: Nested data structures per line item (e.g. Sales order header --> Sales order item)
+* DataFiles and TestDataGenerator: Read remote data sources (e.g. other sheets or SQL-Databases)
+* Integration with Atlassian Confluence to export test run results into Confluence WIKI-Pages (and Sub-pages!)
+* Export results to multiple Excel-Sheets (e.g. when collecting reusable master data like customer master records)
+
+1.1.5
 ^^^^^^^
 This is the version, that was released as first publicly downloadable version.
 

+ 1 - 1
docs/conf.py

@@ -24,7 +24,7 @@ copyright = '2020, Bernhard Buhl'
 author = 'Bernhard Buhl'
 
 # The full version, including alpha/beta/rc tags
-release = '1.1.15'
+release = '1.1.20'
 
 
 # -- General configuration ---------------------------------------------------

BIN
examples/CompleteBaangtWebdemo_result_update.xlsx


BIN
examples/CompleteBaangtWebdemo_usecount.xlsx


+ 2 - 2
setup.py

@@ -6,7 +6,7 @@ if __name__ == '__main__':
 
     setuptools.setup(
         name="baangt",
-        version="1.1.15",
+        version="1.1.20",
         author="Bernhard Buhl",
         author_email="info@baangt.org",
         description="Open source Test Automation Suite for MacOS, Windows, Linux",
@@ -25,7 +25,7 @@ if __name__ == '__main__':
                           "schwifty", "selenium", "sqlalchemy",
                           "urllib3", "psutil", "pymsteams", "slack-webhook",
                           "xl2dict", "xlrd3", "xlsxwriter", "atlassian-python-api",
-                          "baangt-MvCrrspXLS"],
+                          "icopy2xls"],
         classifiers=[
             "Programming Language :: Python :: 3",
             "License :: OSI Approved :: MIT License",

BIN
tests/0TestInput/RawTestData.xlsx


+ 0 - 1
tests/0TestInput/ServiceTestInput/globalsNoBrowser.json

@@ -9,7 +9,6 @@
     "TC.BrowserWindowSize": "1024x768",
     "TC.LogLevel": "Debug",
     "Stage": "Test",
-    "SendMailTo": "randomthings2206@gmail.com, buhl@buhl-consulting.com.cy",
     "MsWebHook": "",
     "SlackWebHook": "",
     "TelegramBot": "",

+ 0 - 13
tests/test_ServiceTest.py

@@ -108,7 +108,6 @@ def test_regular_firefox():
     assert new_file
     output_file = output_dir.joinpath(new_file[0][0]).as_posix()
     check_output(output_file)
-    os.remove(output_file)
     return "Firefox regular test succeed output file =", new_file[0][0]
 
 
@@ -121,7 +120,6 @@ def test_parellel_firefox():
     assert new_file
     output_file = output_dir.joinpath(new_file[0][0]).as_posix()
     check_output(output_file)
-    os.remove(output_file)
     return "Firefox parellel test succeed output file =", new_file[0][0]
 
 
@@ -135,7 +133,6 @@ def test_browsermob_proxy_firefox():
     output_file = output_dir.joinpath(new_file[0][0]).as_posix()
     check_output(output_file)
     check_browsermob_output(output_file)
-    os.remove(output_file)
     return "Firefox Browsermob test succeed output file =", new_file[0][0]
 
 
@@ -148,7 +145,6 @@ def test_headless_firefox():
     assert new_file
     output_file = output_dir.joinpath(new_file[0][0]).as_posix()
     check_output(output_file)
-    os.remove(output_file)
     return "Firefox headless test succeed output file =", new_file[0][0]
 
 
@@ -160,8 +156,6 @@ def test_csv_firefox():
     new_file = folder_monitor.getNewFiles()
     assert new_file
     assert ".csv" in new_file[0][0]
-    output_file = output_dir.joinpath(new_file[0][0]).as_posix()
-    os.remove(output_file)
     return "Firefox Output Format test succeed output file =", new_file[0][0]
 
 
@@ -176,7 +170,6 @@ def test_regular_chrome():
     assert new_file
     output_file = output_dir.joinpath(new_file[0][0]).as_posix()
     check_output(output_file)
-    os.remove(output_file)
     return "Chrome regular test succeed output file =", new_file[0][0]
 
 
@@ -190,7 +183,6 @@ def test_parellel_chrome():
     assert new_file
     output_file = output_dir.joinpath(new_file[0][0]).as_posix()
     check_output(output_file)
-    os.remove(output_file)
     return "Chrome parellel test succeed output file =", new_file[0][0]
 
 
@@ -205,7 +197,6 @@ def test_browsermob_proxy_chrome():
     output_file = output_dir.joinpath(new_file[0][0]).as_posix()
     check_output(output_file)
     check_browsermob_output(output_file)
-    os.remove(output_file)
     return "Chrome Browsermob test succeed output file =", new_file[0][0]
 
 
@@ -223,7 +214,6 @@ def test_headless_chrome():
     assert new_file
     output_file = output_dir.joinpath(new_file[0][0]).as_posix()
     check_output(output_file)
-    os.remove(output_file)
     return "Chrome headless test succeed output file =", new_file[0][0]
 
 
@@ -246,7 +236,6 @@ def test_full_BaangtWebDemo():
     assert new_file
     output_file = output_dir.joinpath(new_file[0][0]).as_posix()
     check_output(output_file)
-    os.remove(output_file)
 
 def test_NestedIfElse_with_NoBrowser():
     run_file = str(input_dir.joinpath("CompleteBaangtWebdemo_else.xlsx"))
@@ -255,7 +244,6 @@ def test_NestedIfElse_with_NoBrowser():
     assert new_file
     output_file = output_dir.joinpath(new_file[0][0]).as_posix()
     check_output(output_file)
-    os.remove(output_file)
 
 def test_NestedIfElse_with_greater_endif():
     run_file = str(input_dir.joinpath("CompleteBaangtWebdemo_else_error.xlsx"))
@@ -286,4 +274,3 @@ def test_NestedLoops_and_repeat():
     s = Session()
     data = s.query(TestrunLog).get(uuid.UUID(TestRunUUID).bytes)
     assert "textarea2" in json.loads(data.RLPJson)
-    os.remove(output_file)

+ 10 - 0
tests/test_TestDataGenerator.py

@@ -136,3 +136,13 @@ def test_rre_no_data_to_match():
     assert len(rrd_output_dict) == 10
     for data in rrd_output_dict:
         print(data)
+
+
+def test_renv():
+    data = TestDataGenerator.get_env_variable("(USERNAME, test)")
+    assert data
+
+
+def test_renv_without_default():
+    with pytest.raises(BaseException):
+        TestDataGenerator.get_env_variable("(URNAMEfh)")