cancel
Showing results for 
Search instead for 
Did you mean: 
cancel
280
Views
0
Helpful
0
Replies

Tidal Job Dependencies API Issue

ppkuanr
Level 1
Level 1

Hi Team,

I've developed a Python script to copy Tidal jobs on demand. While the dependencies are visible in the GUI, I'm unable to retrieve them through the API method. Below is my implementation:

 

For copying a job:

def copy_jobonly(path):
    jobid, parentid = get_Job_Detail(path)
    print("Inside copy_jobonly", path)
    print(jobid, parentid)
    url = "http://xxxx/api/tes-6.5/post"
    credentials = f"{username}:{password}"
    encoded_credentials = base64.b64encode(credentials.encode()).decode()
    headers = {
        "Content-Type": "application/x-www-form-urlencoded",
        "Authorization": f"Basic {encoded_credentials}"
    }
    post_command = f"""<?xml version="1.0" encoding="UTF-8" ?>
                        <entry xmlns="http://purl.org/atom/ns#">
	                        <tes:Job.copy xmlns:tes="http://www.tidalsoftware.com/client/tesservlet">
		                        <id>{jobid}</id>
                                <parentid>{parentid}</parentid>
                                <prefix>copied_</prefix>
                            </tes:Job.copy>
                        </entry>"""
    payload = {"data": post_command}

    try:
        response = requests.post(url, data=payload, headers=headers)
        if response.status_code == 200:
            print("Response:", response.text)
            return response.text
        else:
            print(f"Error: {response.status_code}, Message: {response.text}")

    except requests.exceptions.RequestException as e:
        print(f"An error occurred: {e}")

For getting the dependencies:

def get_Alldependencies():
    url = "http://xxxx/api/tes-6.5/JobDependency.getList"

    try:
        # Make the GET request
        response = requests.get(url, auth=HTTPBasicAuth(username, password))
        response.raise_for_status()

        # Check if the response is Atom XML
        if response.headers.get("Content-Type").startswith("application/atom+xml"):
            root = ET.fromstring(response.text)

            namespaces = {
                'atom': 'http://purl.org/atom/ns#',  # Atom namespace
                'tes': 'http://www.tidalsoftware.com/client/tesservlet'  # TES namespace
            }

            # Initialize a list to store the data
            data = []

            # Loop through entries in the XML response
            for entry in root.findall(".//atom:entry", namespaces=namespaces):
                job = entry.find(".//tes:jobdependency", namespaces=namespaces)

                # Extract job details
                prim_id = job.find(".//tes:id", namespaces=namespaces)
                jobid = job.find(".//tes:jobid", namespaces=namespaces)
                jobname = job.find(".//tes:jobname", namespaces=namespaces)
                depjobname = job.find(".//tes:depjobname", namespaces=namespaces)
                depjobparent = job.find(".//tes:depjobparent", namespaces=namespaces)
                depjobid = job.find(".//tes:depjobid", namespaces=namespaces)
                status = job.find(".//tes:status", namespaces=namespaces)


                # Append the extracted data as a dictionary
                try:
                    data.append({
                        "Primary ID": prim_id.text,
                        "Job ID": jobid.text,
                        "Job Name": jobname.text,
                        "Dependent Job Name": depjobname.text,
                        "Dependent Job Parent": depjobparent.text,
                        "Dependent Job Id": depjobid.text,
                        "Status": status.text
                    })
                except:
                    data.append({
                        "Primary ID": None,
                        "Job ID": None,
                        "Job Name": None,
                        "Dependent Job Name": None,
                        "Dependent Job Parent": None,
                        "Dependent Job Id": None
                    })
                    
            print(data[-1])
            # Convert the list of dictionaries to a DataFrame
            df = pd.DataFrame(data)

            print(df)

            # Save the DataFrame to a CSV file
            df.to_csv(r"C:\Users\xxxx\Downloads\job_dependencies.csv", index=False)
            print("Data has been written to job_dependencies.csv")

        else:
            print("Unexpected response format. Expected Atom XML.")

    except requests.exceptions.RequestException as e:
        print(f"An error occurred: {e}")

Issue:

  • Dependencies are visible in Tidal GUI
  • Unable to fetch the same dependencies using the API
  • Both functions work independently, but dependency information is missing in API response

Could you please help identify:

  1. Why isn't the API returning the dependency information?
  2. If there's an alternative approach to retrieve job dependencies?

I've attached the complete code for reference.

Thank you for your assistance.

0 Replies 0

Review Cisco Networking for a $25 gift card