File size: 10,889 Bytes
4851501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
#!/usr/bin/env python3
"""
STRI GIS Portal Catalog Scraper

Discovers and catalogs datasets from the Smithsonian Tropical Research Institute
GIS Portal using the ArcGIS Online API.
"""

import requests
import json
from pathlib import Path
import logging
from datetime import datetime
from typing import Dict, List, Optional
import re

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

DATA_DIR = Path(__file__).parent.parent / "data" / "stri"
METADATA_DIR = DATA_DIR / "metadata"

# STRI GIS Portal ArcGIS Online Organization ID
STRI_ORG_ID = "nzS0F0zdNLvs7nc8"
ARCGIS_BASE_URL = "https://www.arcgis.com/sharing/rest"

# Priority keywords for dataset selection
HIGH_PRIORITY_KEYWORDS = [
    "panama", "national", "country", "forest", "cover", "protected", "areas",
    "land use", "biodiversity", "climate", "water", "infrastructure",
    "administrative", "boundaries", "poverty", "population"
]

# Keywords to deprioritize (site-specific, not national)
LOW_PRIORITY_KEYWORDS = [
    "bci", "barro colorado", "island", "pena blanca", "site-specific",
    "trail", "sensor", "camera", "plot"
]

# Temporal dataset patterns (to identify multi-year series)
TEMPORAL_PATTERNS = [
    r"\b(19\d{2}|20\d{2})\b",  # Years like 1992, 2021
    r"edition\s+(19\d{2}|20\d{2})",
    r"version\s+(19\d{2}|20\d{2})"
]


def search_stri_portal(query: str = "panama", num: int = 100, start: int = 1) -> Dict:
    """
    Search the STRI GIS Portal using ArcGIS REST API
    
    Args:
        query: Search query string (default: "panama" for Panama-specific datasets)
        num: Number of results per page (max 100)
        start: Starting position
        
    Returns:
        JSON response with search results
    """
    search_url = f"{ARCGIS_BASE_URL}/search"
    
    # Search for Panama-related datasets within STRI organization
    params = {
        "q": f'orgid:{STRI_ORG_ID} AND (panama OR panamá)',
        "f": "json",
        "num": num,
        "start": start,
        "sortField": "modified",
        "sortOrder": "desc"
    }
    
    try:
        response = requests.get(search_url, params=params, timeout=30)
        response.raise_for_status()
        return response.json()
    except Exception as e:
        logger.error(f"Failed to search portal: {e}")
        return {}


def get_item_details(item_id: str) -> Optional[Dict]:
    """Get detailed metadata for a specific item"""
    details_url = f"{ARCGIS_BASE_URL}/content/items/{item_id}"
    
    params = {"f": "json"}
    
    try:
        response = requests.get(details_url, params=params, timeout=30)
        response.raise_for_status()
        return response.json()
    except Exception as e:
        logger.error(f"Failed to get item {item_id}: {e}")
        return None


def extract_year_from_title(title: str) -> Optional[int]:
    """Extract year from dataset title"""
    for pattern in TEMPORAL_PATTERNS:
        match = re.search(pattern, title, re.IGNORECASE)
        if match:
            year_str = match.group(1) if match.lastindex else match.group(0)
            try:
                return int(year_str)
            except ValueError:
                continue
    return None


def calculate_priority_score(item: Dict) -> float:
    """
    Calculate priority score for a dataset based on:
    - National vs site-specific coverage
    - Relevance keywords
    - Data type (prefer Feature Services)
    - Recency
    """
    score = 50.0  # Baseline
    
    title = item.get("title", "").lower() if item.get("title") else ""
    description = item.get("description", "").lower() if item.get("description") else ""
    tags = " ".join(item.get("tags", [])).lower() if item.get("tags") else ""
    item_type = item.get("type", "")
    
    combined_text = f"{title} {description} {tags}"
    
    # Boost for high-priority keywords
    for keyword in HIGH_PRIORITY_KEYWORDS:
        if keyword in combined_text:
            score += 5
    
    # Penalty for low-priority (site-specific) keywords
    for keyword in LOW_PRIORITY_KEYWORDS:
        if keyword in combined_text:
            score -= 15
    
    # Prefer Feature Services (queryable GIS data)
    if "Feature Service" in item_type:
        score += 20
    elif "Map Service" in item_type:
        score += 10
    
    # Boost for temporal datasets
    if extract_year_from_title(title):
        score += 10
    
    # Boost for recent updates
    modified = item.get("modified", 0)
    if modified:
        # Convert milliseconds to years since 2020
        years_since_2020 = (modified - 1577836800000) / (365.25 * 24 * 60 * 60 * 1000)
        score += min(years_since_2020 * 2, 10)  # Max +10 for very recent
    
    return score


def build_rest_endpoint(item: Dict) -> Optional[str]:
    """Construct the REST endpoint URL for a Feature Service"""
    item_type = item.get("type", "")
    
    if "Feature Service" not in item_type:
        return None
    
    # Standard ArcGIS REST endpoint pattern
    url = item.get("url")
    if url and "/FeatureServer" in url:
        # Assume layer 0 if not specified
        if not url.endswith(("FeatureServer", "FeatureServer/")):
            return url
        return f"{url.rstrip('/')}/0"
    
    # Fallback: construct from item ID
    item_id = item.get("id")
    if item_id:
        return f"https://services.arcgis.com/{STRI_ORG_ID}/arcgis/rest/services/{item_id}/FeatureServer/0"
    
    return None


def catalog_datasets(max_datasets: int = 100) -> List[Dict]:
    """
    Scrape the STRI portal and build a prioritized catalog
    
    Args:
        max_datasets: Maximum number of datasets to retrieve
        
    Returns:
        List of dataset metadata dictionaries
    """
    datasets = []
    start = 1
    batch_size = 100
    
    logger.info("Scraping STRI GIS Portal...")
    
    while len(datasets) < max_datasets:
        logger.info(f"Fetching items {start} to {start + batch_size - 1}...")
        
        results = search_stri_portal(num=batch_size, start=start)
        
        if not results or "results" not in results:
            break
        
        items = results["results"]
        
        if not items:
            break
        
        for item in items:
            # Focus on Feature Services (queryable geospatial data)
            if "Feature Service" not in item.get("type", ""):
                continue
            
            # Calculate priority
            priority = calculate_priority_score(item)
            
            # Extract year if temporal
            year = extract_year_from_title(item.get("title", ""))
            
            # Build REST endpoint
            rest_endpoint = build_rest_endpoint(item)
            
            dataset = {
                "id": item.get("id"),
                "title": item.get("title"),
                "description": item.get("description", ""),
                "type": item.get("type"),
                "tags": item.get("tags", []),
                "modified": item.get("modified"),
                "modified_date": datetime.fromtimestamp(
                    item.get("modified", 0) / 1000
                ).isoformat() if item.get("modified") else None,
                "url": item.get("url"),
                "rest_endpoint": rest_endpoint,
                "year": year,
                "priority_score": round(priority, 2)
            }
            
            datasets.append(dataset)
        
        # Check if there are more results
        if start + batch_size > results.get("total", 0):
            break
        
        start += batch_size
    
    # Sort by priority score
    datasets.sort(key=lambda x: x["priority_score"], reverse=True)
    
    logger.info(f"Found {len(datasets)} Feature Service datasets")
    
    return datasets[:max_datasets]


def identify_temporal_groups(datasets: List[Dict]) -> Dict[str, List[Dict]]:
    """
    Group datasets by base name to identify temporal series
    
    Returns:
        Dictionary mapping base name to list of datasets with years
    """
    temporal_groups = {}
    
    for dataset in datasets:
        if dataset["year"] is None:
            continue
        
        # Remove year from title to get base name
        title = dataset["title"]
        base_name = re.sub(r'\b(19\d{2}|20\d{2})\b', '', title)
        base_name = re.sub(r'\s+', ' ', base_name).strip()
        base_name = re.sub(r'edition|version', '', base_name, flags=re.IGNORECASE).strip()
        
        if base_name not in temporal_groups:
            temporal_groups[base_name] = []
        
        temporal_groups[base_name].append(dataset)
    
    # Filter to groups with multiple years
    temporal_groups = {
        k: sorted(v, key=lambda x: x["year"])
        for k, v in temporal_groups.items()
        if len(v) > 1
    }
    
    return temporal_groups


def save_catalog(datasets: List[Dict], temporal_groups: Dict[str, List[Dict]]):
    """Save catalog and temporal groups to JSON files"""
    METADATA_DIR.mkdir(parents=True, exist_ok=True)
    
    # Save main catalog
    catalog_path = METADATA_DIR / "stri_catalog.json"
    with open(catalog_path, 'w') as f:
        json.dump({
            "generated_at": datetime.now().isoformat(),
            "total_datasets": len(datasets),
            "datasets": datasets
        }, f, indent=2)
    
    logger.info(f"Saved catalog to {catalog_path}")
    
    # Save temporal groups
    if temporal_groups:
        temporal_path = METADATA_DIR / "stri_temporal_groups.json"
        with open(temporal_path, 'w') as f:
            json.dump({
                "generated_at": datetime.now().isoformat(),
                "num_groups": len(temporal_groups),
                "groups": temporal_groups
            }, f, indent=2)
        
        logger.info(f"Saved {len(temporal_groups)} temporal groups to {temporal_path}")


def main():
    """Main execution"""
    logger.info("=== STRI GIS Portal Catalog Scraper ===")
    
    # Catalog datasets
    datasets = catalog_datasets(max_datasets=100)
    
    # Identify temporal groups
    temporal_groups = identify_temporal_groups(datasets)
    
    # Save results
    save_catalog(datasets, temporal_groups)
    
    # Print summary
    logger.info("\n" + "="*60)
    logger.info(f"✅ Cataloged {len(datasets)} datasets")
    logger.info(f"📊 Found {len(temporal_groups)} temporal dataset groups")
    
    if temporal_groups:
        logger.info("\nTemporal Groups:")
        for base_name, group in list(temporal_groups.items())[:5]:
            years = [d["year"] for d in group]
            logger.info(f"  - {base_name}: {years}")
    
    logger.info("\nTop 10 Priority Datasets:")
    for i, dataset in enumerate(datasets[:10], 1):
        logger.info(f"  {i}. [{dataset['priority_score']:.1f}] {dataset['title']}")
    
    logger.info("="*60)


if __name__ == "__main__":
    main()