Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 55 additions & 17 deletions examples/advanced/file_upload.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def log(call: str):

# Simple SHA-256 helper with caching to avoid re-reading large files multiple times.
_FILE_HASH_CACHE = {}

ATTRIBUTE_VISIBILITY_DELAYS = (0, 3, 10, 20, 35, 50, 70, 90, 120)

def file_sha256(path: Path): # returns (hex_digest, size_bytes)
try:
Expand Down Expand Up @@ -153,22 +153,32 @@ def generate_test_pdf(size_mb: int = 10) -> Path:
return test_file


def backoff(op, *, delays=(0, 2, 5, 10), retry_status=(400, 403, 404, 409, 412, 429, 500, 502, 503, 504)):
def backoff(op, *, delays=(0, 2, 5, 10, 20, 20)):
last = None
total_delay = 0
attempts = 0
for d in delays:
if d:
time.sleep(d)
total_delay += d
attempts += 1
try:
return op()
result = op()
if attempts > 1:
retry_count = attempts - 1
print(
f" ↺ Backoff succeeded after {retry_count} retry(s); waited {total_delay}s total."
)
return result
except Exception as ex: # noqa: BLE001
last = ex
r = getattr(ex, "response", None)
code = getattr(r, "status_code", None)
if isinstance(ex, requests.exceptions.HTTPError) and code in retry_status:
continue
# For non-HTTP errors just retry the schedule
continue
if last:
if attempts:
retry_count = max(attempts - 1, 0)
print(
f" ⚠ Backoff exhausted after {retry_count} retry(s); waited {total_delay}s total."
)
raise last


Expand All @@ -178,12 +188,12 @@ def backoff(op, *, delays=(0, 2, 5, 10), retry_status=(400, 403, 404, 409, 412,

def ensure_table():
# Check by schema
existing = client.get_table_info(TABLE_SCHEMA_NAME)
existing = backoff(lambda: client.get_table_info(TABLE_SCHEMA_NAME))
if existing:
print({"table": TABLE_SCHEMA_NAME, "existed": True})
return existing
log("client.create_table('new_FileSample', schema={'new_Title': 'string'})")
info = client.create_table(TABLE_SCHEMA_NAME, {"new_Title": "string"})
info = backoff(lambda: client.create_table(TABLE_SCHEMA_NAME, {"new_Title": "string"}))
print({"table": TABLE_SCHEMA_NAME, "existed": False, "metadata_id": info.get("metadata_id")})
return info

Expand Down Expand Up @@ -217,7 +227,7 @@ def ensure_file_attribute_generic(schema_name: str, label: str, key_prefix: str)
f"{odata.api}/EntityDefinitions({meta_id})/Attributes?$select=SchemaName&$filter="
f"SchemaName eq '{schema_name}'"
)
r = odata._request("get", url)
r = backoff(lambda: odata._request("get", url), delays=ATTRIBUTE_VISIBILITY_DELAYS)
val = []
try:
val = r.json().get("value", [])
Expand Down Expand Up @@ -245,7 +255,7 @@ def ensure_file_attribute_generic(schema_name: str, label: str, key_prefix: str)
}
try:
url = f"{odata.api}/EntityDefinitions({meta_id})/Attributes"
r = odata._request("post", url, json=payload)
backoff(lambda: odata._request("post", url, json=payload), delays=ATTRIBUTE_VISIBILITY_DELAYS)
print({f"{key_prefix}_file_attribute_created": True})
time.sleep(2)
return True
Expand All @@ -263,11 +273,39 @@ def ensure_file_attribute_generic(schema_name: str, label: str, key_prefix: str)
return False


def wait_for_attribute_visibility(logical_name: str, label: str):
if not logical_name or not entity_set:
return False
odata = client._get_odata()
probe_url = f"{odata.api}/{entity_set}?$top=1&$select={logical_name}"
waited = 0
last_error = None
for delay in ATTRIBUTE_VISIBILITY_DELAYS:
if delay:
time.sleep(delay)
waited += delay
try:
resp = odata._request("get", probe_url)
try:
resp.json()
except Exception: # noqa: BLE001
pass
if waited:
print({f"{label}_attribute_visible_wait_seconds": waited})
return True
except Exception as ex: # noqa: BLE001
last_error = ex
continue
raise RuntimeError(f"Timed out waiting for attribute '{logical_name}' to materialize") from last_error


# Conditionally ensure each attribute only if its mode is selected
if run_small:
ensure_file_attribute_generic(small_file_attr_schema, "Small Document", "small")
wait_for_attribute_visibility(small_file_attr_logical, "small")
if run_chunk:
ensure_file_attribute_generic(chunk_file_attr_schema, "Chunk Document", "chunk")
wait_for_attribute_visibility(chunk_file_attr_logical, "chunk")

# --------------------------- Record create ---------------------------
record_id = None
Expand Down Expand Up @@ -325,7 +363,7 @@ def get_dataset_info(file_path: Path):
dl_url_single = (
f"{odata.api}/{entity_set}({record_id})/{small_file_attr_logical}/$value" # raw entity_set URL OK
)
resp_single = odata._request("get", dl_url_single)
resp_single = backoff(lambda: odata._request("get", dl_url_single))
content_single = resp_single.content or b""
import hashlib # noqa: WPS433

Expand Down Expand Up @@ -355,7 +393,7 @@ def get_dataset_info(file_path: Path):
)
)
print({"small_replace_upload_completed": True, "small_replace_source_size": replace_size_small})
resp_single_replace = odata._request("get", dl_url_single)
resp_single_replace = backoff(lambda: odata._request("get", dl_url_single))
content_single_replace = resp_single_replace.content or b""
downloaded_hash_replace = hashlib.sha256(content_single_replace).hexdigest() if content_single_replace else None
hash_match_replace = (
Expand Down Expand Up @@ -397,7 +435,7 @@ def get_dataset_info(file_path: Path):
dl_url_chunk = (
f"{odata.api}/{entity_set}({record_id})/{chunk_file_attr_logical}/$value" # raw entity_set for download
)
resp_chunk = odata._request("get", dl_url_chunk)
resp_chunk = backoff(lambda: odata._request("get", dl_url_chunk))
content_chunk = resp_chunk.content or b""
import hashlib # noqa: WPS433

Expand Down Expand Up @@ -426,7 +464,7 @@ def get_dataset_info(file_path: Path):
)
)
print({"chunk_replace_upload_completed": True})
resp_chunk_replace = odata._request("get", dl_url_chunk)
resp_chunk_replace = backoff(lambda: odata._request("get", dl_url_chunk))
content_chunk_replace = resp_chunk_replace.content or b""
dst_hash_chunk_replace = hashlib.sha256(content_chunk_replace).hexdigest() if content_chunk_replace else None
hash_match_chunk_replace = (
Expand Down Expand Up @@ -459,7 +497,7 @@ def get_dataset_info(file_path: Path):
if cleanup_table:
try:
log(f"client.delete_table('{TABLE_SCHEMA_NAME}')")
client.delete_table(TABLE_SCHEMA_NAME)
backoff(lambda: client.delete_table(TABLE_SCHEMA_NAME))
print({"table_deleted": True})
except Exception as e: # noqa: BLE001
print({"table_deleted": False, "error": str(e)})
Expand Down
81 changes: 61 additions & 20 deletions examples/advanced/walkthrough.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,12 @@

import sys
import json
import time
from enum import IntEnum
from azure.identity import InteractiveBrowserCredential
from PowerPlatform.Dataverse.client import DataverseClient
from PowerPlatform.Dataverse.core.errors import MetadataError
import requests


# Simple logging helper
Expand All @@ -36,6 +39,35 @@ class Priority(IntEnum):
HIGH = 3


def backoff(op, *, delays=(0, 2, 5, 10, 20, 20)):
last = None
total_delay = 0
attempts = 0
for d in delays:
if d:
time.sleep(d)
total_delay += d
attempts += 1
try:
result = op()
if attempts > 1:
retry_count = attempts - 1
print(
f" ↺ Backoff succeeded after {retry_count} retry(s); waited {total_delay}s total."
)
return result
except Exception as ex: # noqa: BLE001
last = ex
continue
if last:
if attempts:
retry_count = max(attempts - 1, 0)
print(
f" ⚠ Backoff exhausted after {retry_count} retry(s); waited {total_delay}s total."
)
raise last


def main():
print("=" * 80)
print("Dataverse SDK Walkthrough")
Expand Down Expand Up @@ -72,7 +104,7 @@ def main():
table_name = "new_WalkthroughDemo"

log_call(f"client.get_table_info('{table_name}')")
table_info = client.get_table_info(table_name)
table_info = backoff(lambda: client.get_table_info(table_name))

if table_info:
print(f"✓ Table already exists: {table_info.get('table_schema_name')}")
Expand All @@ -87,7 +119,7 @@ def main():
"new_Completed": "bool",
"new_Priority": Priority,
}
table_info = client.create_table(table_name, columns)
table_info = backoff(lambda: client.create_table(table_name, columns))
print(f"✓ Created table: {table_info.get('table_schema_name')}")
print(f" Columns created: {', '.join(table_info.get('columns_created', []))}")

Expand All @@ -107,7 +139,7 @@ def main():
"new_Completed": False,
"new_Priority": Priority.MEDIUM,
}
id1 = client.create(table_name, single_record)[0]
id1 = backoff(lambda: client.create(table_name, single_record))[0]
print(f"✓ Created single record: {id1}")

# Multiple create
Expand Down Expand Up @@ -135,7 +167,7 @@ def main():
"new_Priority": Priority.HIGH,
},
]
ids = client.create(table_name, multiple_records)
ids = backoff(lambda: client.create(table_name, multiple_records))
print(f"✓ Created {len(ids)} records: {ids}")

# ============================================================================
Expand All @@ -147,7 +179,7 @@ def main():

# Single read by ID
log_call(f"client.get('{table_name}', '{id1}')")
record = client.get(table_name, id1)
record = backoff(lambda: client.get(table_name, id1))
print("✓ Retrieved single record:")
print(
json.dumps(
Expand All @@ -167,7 +199,8 @@ def main():
# Multiple read with filter
log_call(f"client.get('{table_name}', filter='new_quantity gt 5')")
all_records = []
for page in client.get(table_name, filter="new_quantity gt 5"):
records_iterator = backoff(lambda: client.get(table_name, filter="new_quantity gt 5"))
for page in records_iterator:
all_records.extend(page)
print(f"✓ Found {len(all_records)} records with new_quantity > 5")
for rec in all_records:
Expand All @@ -182,13 +215,13 @@ def main():

# Single update
log_call(f"client.update('{table_name}', '{id1}', {{...}})")
client.update(table_name, id1, {"new_Quantity": 100})
updated = client.get(table_name, id1)
backoff(lambda: client.update(table_name, id1, {"new_Quantity": 100}))
updated = backoff(lambda: client.get(table_name, id1))
print(f"✓ Updated single record new_Quantity: {updated.get('new_quantity')}")

# Multiple update (broadcast same change)
log_call(f"client.update('{table_name}', [{len(ids)} IDs], {{...}})")
client.update(table_name, ids, {"new_Completed": True})
backoff(lambda: client.update(table_name, ids, {"new_Completed": True}))
print(f"✓ Updated {len(ids)} records to new_Completed=True")

# ============================================================================
Expand All @@ -210,13 +243,14 @@ def main():
}
for i in range(1, 21)
]
paging_ids = client.create(table_name, paging_records)
paging_ids = backoff(lambda: client.create(table_name, paging_records))
print(f"✓ Created {len(paging_ids)} records for paging demo")

# Query with paging
log_call(f"client.get('{table_name}', page_size=5)")
print("Fetching records with page_size=5...")
for page_num, page in enumerate(client.get(table_name, orderby=["new_Quantity"], page_size=5), start=1):
paging_iterator = backoff(lambda: client.get(table_name, orderby=["new_Quantity"], page_size=5))
for page_num, page in enumerate(paging_iterator, start=1):
record_ids = [r.get("new_walkthroughdemoid")[:8] + "..." for r in page]
print(f" Page {page_num}: {len(page)} records - IDs: {record_ids}")

Expand All @@ -230,7 +264,7 @@ def main():
log_call(f"client.query_sql('SELECT new_title, new_quantity FROM {table_name} WHERE new_completed = 1')")
sql = f"SELECT new_title, new_quantity FROM new_walkthroughdemo WHERE new_completed = 1"
try:
results = client.query_sql(sql)
results = backoff(lambda: client.query_sql(sql))
print(f"✓ SQL query returned {len(results)} completed records:")
for result in results[:5]: # Show first 5
print(f" - new_Title='{result.get('new_title')}', new_Quantity={result.get('new_quantity')}")
Expand All @@ -252,8 +286,8 @@ def main():
"new_Completed": False,
"new_Priority": "High", # String label instead of int
}
label_id = client.create(table_name, label_record)[0]
retrieved = client.get(table_name, label_id)
label_id = backoff(lambda: client.create(table_name, label_record))[0]
retrieved = backoff(lambda: client.get(table_name, label_id))
print(f"✓ Created record with string label 'High' for new_Priority")
print(f" new_Priority stored as integer: {retrieved.get('new_priority')}")
print(f" new_Priority@FormattedValue: {retrieved.get('new_priority@OData.Community.Display.V1.FormattedValue')}")
Expand All @@ -266,12 +300,12 @@ def main():
print("=" * 80)

log_call(f"client.create_columns('{table_name}', {{'new_Notes': 'string'}})")
created_cols = client.create_columns(table_name, {"new_Notes": "string"})
created_cols = backoff(lambda: client.create_columns(table_name, {"new_Notes": "string"}))
print(f"✓ Added column: {created_cols[0]}")

# Delete the column we just added
log_call(f"client.delete_columns('{table_name}', ['new_Notes'])")
client.delete_columns(table_name, ["new_Notes"])
backoff(lambda: client.delete_columns(table_name, ["new_Notes"]))
print(f"✓ Deleted column: new_Notes")

# ============================================================================
Expand All @@ -283,12 +317,12 @@ def main():

# Single delete
log_call(f"client.delete('{table_name}', '{id1}')")
client.delete(table_name, id1)
backoff(lambda: client.delete(table_name, id1))
print(f"✓ Deleted single record: {id1}")

# Multiple delete (delete the paging demo records)
log_call(f"client.delete('{table_name}', [{len(paging_ids)} IDs])")
job_id = client.delete(table_name, paging_ids)
job_id = backoff(lambda: client.delete(table_name, paging_ids))
print(f"✓ Bulk delete job started: {job_id}")
print(f" (Deleting {len(paging_ids)} paging demo records)")

Expand All @@ -300,8 +334,15 @@ def main():
print("=" * 80)

log_call(f"client.delete_table('{table_name}')")
client.delete_table(table_name)
print(f"✓ Deleted table: {table_name}")
try:
backoff(lambda: client.delete_table(table_name))
print(f"✓ Deleted table: {table_name}")
except Exception as ex: # noqa: BLE001
code = getattr(getattr(ex, "response", None), "status_code", None)
if (isinstance(ex, (requests.exceptions.HTTPError, MetadataError)) and code == 404):
print(f"✓ Table removed: {table_name}")
else:
raise

# ============================================================================
# SUMMARY
Expand Down
Loading