print("Starting", __name__, "...")
import requests, zipfile, os, time, v
import pandas as pd
from datetime import datetime
from google.transit import gtfs_realtime_pb2
from v import *
from data import URLS, ROUTE_TYPE_NAMES
import builtins
import sqlite3

HOURS = 2
AGE = 2

def _normalize_rt_delays(rt_raw):
    """
    Normalize raw realtime dict from load_realtime into a clean mapping:
    (trip_id, stop_id) -> int delay (seconds). Coerces True->0, extracts
    numeric from lists/tuples/dicts/Series, and uses max() when multiple
    updates exist for the same key.
    """
    rt_delays = {}
    for k, vval in rt_raw.items():
        try:
            if vval is True:
                delay_int = 0
            elif isinstance(vval, (int, float, str)):
                try:
                    delay_int = int(vval)
                except Exception:
                    delay_int = 0
            elif isinstance(vval, (list, tuple, pd.Series, dict)):
                if isinstance(vval, dict):
                    vals = list(vval.values())
                else:
                    vals = list(vval)
                delay_int = 0
                for candidate in vals:
                    try:
                        delay_int = int(candidate)
                        break
                    except Exception:
                        continue
            else:
                try:
                    delay_int = int(vval)
                except Exception:
                    delay_int = 0

            if k in rt_delays:
                rt_delays[k] = max(rt_delays[k], delay_int)
            else:
                rt_delays[k] = delay_int
        except Exception:
            rt_delays[k] = 0

    return rt_delays


def load_static_gtfs(country, operator, cache_age=AGE*24*3600):
    

    db_path = f"sqlcache/gtfs_{country}_{operator}.sqlite"
    if os.path.exists(db_path):
        try:
            # Check if SQLite cache is still fresh
            db_timestamp = os.path.getmtime(db_path)
            db_age = time.time() - db_timestamp
            if db_age < cache_age:
                print(f"Trying to load GTFS for: {country} {operator} from SQLite cache ({db_age/3600:.1f}h old)")
                conn = sqlite3.connect(db_path)
                stops = pd.read_sql("SELECT * FROM stops", conn)
                trips = pd.read_sql("SELECT * FROM trips", conn)
                stop_times = pd.read_sql("SELECT * FROM stop_times", conn)
                routes = pd.read_sql("SELECT * FROM routes", conn)
                calendar_dates = pd.read_sql("SELECT * FROM calendar_dates", conn)
                calendar_dates["date"] = pd.to_datetime(calendar_dates["date"]).dt.date

                conn.close()

                print("Loaded GTFS from SQLite cache")
                v.cache[country][operator] = {
                    "stops": stops,
                    "trips": trips,
                    "stop_times": stop_times,
                    "routes": routes,
                    "calendar_dates": calendar_dates,
                    "timestamp": time.time(),
                    "cache_age": cache_age,
                }
                print("SQLite cache success for", country, operator)
                return
            else:
                print(f"SQLite cache expired ({db_age/3600:.1f}h old, max {cache_age/3600:.1f}h), forcing re-download")
                os.remove(db_path)
        except Exception as e:
            print("SQLite cache failed, falling back to ZIP:", e)



    try: urls = URLS[country][operator]
    except: 
        urls = URLS[country]
        urls["gtfs_url"] = urls["gtfs_url"].replace("_SWE_", operator)
        urls["trip_updates"] = urls["trip_updates"].replace("_SWE_", operator)
        urls["vehicle_positions"] = urls["vehicle_positions"].replace("_SWE_", operator)
    print(urls)
    cache_file = f"cache/gtfs_static_cache_{country}_{operator}.zip"

    need_download = True
    if os.path.exists(cache_file):
        age = time.time() - os.path.getmtime(cache_file)
        if age < cache_age:
            print(f"Using cached GTFS for {country}/{operator} ({age/3600:.1f}h old)")
            need_download = False

    if need_download:
        print(f"Downloading static GTFS for {country}/{operator}â€¦")
        temp_file = cache_file + ".tmp"
        try:
            resp = requests.get(urls["gtfs_url"], headers=urls.get("header"), verify=False)
            resp.raise_for_status()
            with open(temp_file, "wb") as f:
                f.write(resp.content)
            # Validate it's a valid zip file before replacing cache
            with zipfile.ZipFile(temp_file, "r") as test_zip:
                test_zip.testzip()
            # Only replace if validation passed
            if os.path.exists(cache_file):
                os.remove(cache_file)
            os.rename(temp_file, cache_file)
            print("Static GTFS updated")
        except Exception as e:
            print(f"Download failed: {e}")
            if os.path.exists(temp_file):
                os.remove(temp_file)
            if not os.path.exists(cache_file):
                raise
            print("Using existing cache file despite download failure")
    z = zipfile.ZipFile(cache_file, "r")
    # Load into memory once
    stops = pd.read_csv(z.open("stops.txt"), dtype=str)
    trips = pd.read_csv(z.open("trips.txt"), dtype=str)
    stop_times = pd.read_csv(z.open("stop_times.txt"), dtype=str)
    routes = pd.read_csv(z.open("routes.txt"), dtype=str)
    # Rename first
    routes = routes.rename(columns={"route_type": "route_type_static"})
    # Convert to int
    routes["route_type_static"] = routes["route_type_static"].astype(int)
    # NOW you can inspect it
    print(routes["route_type_static"].value_counts())
    # Convert once
    routes["route_type_static"] = routes["route_type_static"].astype(int)
    def safe_usecols(df, cols):
        existing = [c for c in cols if c in df.columns]
        return df[existing]

    stops = safe_usecols(stops, ["stop_id", "stop_name", "parent_station"])
    trips = safe_usecols(trips, ["trip_id", "route_id", "direction_id", "trip_headsign", "service_id"])
    routes = safe_usecols(routes, ["route_id", "route_short_name", "route_long_name", "route_type_static"])
    stop_times = safe_usecols(stop_times, [
        "trip_id",
        "stop_id",
        "departure_time",
        "stop_headsign",
        "pickup_type",
        "drop_off_type"])
    
    # Load calendar_dates to filter by service date
    try:
        calendar_dates = pd.read_csv(z.open("calendar_dates.txt"), dtype=str)
        calendar_dates["date"] = pd.to_datetime(calendar_dates["date"], format="%Y%m%d").dt.date
        # Keep only exception_type "1" (service is operating)
        calendar_dates = calendar_dates[calendar_dates["exception_type"] == "1"]
        print("Loaded calendar_dates.txt")
    except KeyError:
        print("calendar_dates.txt missing, trying calendar.txtâ€¦")
        try:
            cal = pd.read_csv(z.open("calendar.txt"), dtype=str)
            # Expand calendar.txt into a calendar_datesâ€‘like table
            rows = []
            for _, r in cal.iterrows():
                start = datetime.datetime.strptime(r["start_date"], "%Y%m%d").date()
                end   = datetime.datetime.strptime(r["end_date"], "%Y%m%d").date()
                for d in pd.date_range(start, end):
                    weekday = d.weekday()  # Monday=0
                    weekday_col = ["monday","tuesday","wednesday","thursday","friday","saturday","sunday"][weekday]
                    if r.get(weekday_col) == "1":
                        rows.append({"service_id": r["service_id"], "date": d.date()})
            calendar_dates = pd.DataFrame(rows)
            print("calendar.txt loaded as fallback")
        except Exception:
            print("No calendar_dates.txt or calendar.txt found â€” disabling service filtering")
            calendar_dates = pd.DataFrame(columns=["service_id","date"])


    def time_to_seconds(t):
        if pd.isna(t):
            return 0
        if isinstance(t, (int, float)):
            # Interpret 12.0 as 12:00:00
            h = int(t)
            return h * 3600
        t = str(t)
        if ":" not in t:
            # Weird formats â†’ treat as hour only
            try:
                h = int(float(t))
                return h * 3600
            except:
                return 0
        try:
            h, m, s = map(int, t.split(":"))
            return h * 3600 + m * 60 + s
        except:
            return 0

    
    def normalize_seconds(sec):
        if sec >= 24 * 3600:
            return sec - 24 * 3600
        return sec
    stop_times["departure_secs"] = stop_times["departure_time"].apply(time_to_seconds)
    stop_times["normalized_secs"] = stop_times["departure_secs"].apply(normalize_seconds)
    
    v.cache[country][operator] = {
        "stops": stops,
        "trips": trips,
        "stop_times": stop_times,
        "routes": routes,
        "calendar_dates": calendar_dates,
        "timestamp": time.time(),
        "cache_age": cache_age,
        }

    print("Saving GTFS to SQLite cache")
    db_path = f"sqlcache/gtfs_{country}_{operator}.sqlite"
    conn = sqlite3.connect(db_path)
    stops.to_sql("stops", conn, if_exists="replace", index=False)
    trips.to_sql("trips", conn, if_exists="replace", index=False)
    stop_times.to_sql("stop_times", conn, if_exists="replace", index=False)
    routes.to_sql("routes", conn, if_exists="replace", index=False)
    calendar_dates.to_sql("calendar_dates", conn, if_exists="replace", index=False)
    conn.close()


def load_realtime(urls, country, operator):
    """Download and parse GTFS-RT TripUpdates."""
    print("Downloading GTFS-RTâ€¦")
    feed = gtfs_realtime_pb2.FeedMessage()
    try: header = URLS[country][operator]["header"]
    except: header = None
    feed.ParseFromString(requests.get(urls["trip_updates"], verify=False, headers=header).content)

    rt_delays = {}
    for entity in feed.entity:
        if entity.HasField("trip_update"):
            tu = entity.trip_update
            if tu.trip.schedule_relationship == gtfs_realtime_pb2.TripDescriptor.CANCELED: continue
            #if tu.trip.schedule_relationship == 3:continue
            for stu in tu.stop_time_update:
                if stu.HasField("departure"):
                    # SNCB often provides departure.time but not delay
                    if stu.departure.HasField("delay"):
                        rt_delays[(tu.trip.trip_id, stu.stop_id)] = stu.departure.delay
                    elif stu.departure.HasField("time"):
                        rt_delays[(tu.trip.trip_id, stu.stop_id)] = True
    return rt_delays

def is_area_id(stop_id: str) -> bool: return stop_id.isdigit() and len(stop_id) == 16 and stop_id.endswith("000")

def search(query, country, operator):
    #try: v.cache[country][operator]
    #except: v.cache[country][operator] = {"Try again":""}
    try: v.cache[country]
    except: v.cache[country] = {}
    
    # Check if cache exists and is still valid
    data = None
    if operator in v.cache[country]:
        cached = v.cache[country][operator]
        if isinstance(cached, dict) and "timestamp" in cached:
            age = time.time() - cached["timestamp"]
            max_age = cached.get("cache_age", 12 * 3600)  # Default 12 hours
            if age < max_age:
                data = cached
    
    if data is None:
        data = load_static_gtfs(country, operator)
    
    
    stops = data["stops"].copy()

    stops["stop_id"] = stops["stop_id"].astype(str).fillna("")
    stops["stop_name"] = stops["stop_name"].astype(str).fillna("")

    q = (query or "").lower()
    #q = q[:5]
    

    mask = (
        stops["stop_id"].str.lower().str.contains(q, na=False) |
        stops["stop_name"].str.lower().str.contains(q, na=False))

    matches = stops[mask].copy()

    if country == "it":
        # Prefer prefix match for stop_name to avoid Termini Imerese
        mask = (
            stops["stop_id"].str.lower().str.contains(q) |
            stops["stop_name"].str.lower().str.startswith(q)
        )
        matches = stops[mask].copy()

        best_rows = []
        for name, group in matches.groupby("stop_name"):
            best_rows.append(group.iloc[0])
        matches = pd.DataFrame(best_rows)



    if country == "se":
        best_rows = []
        for name, group in matches.groupby("stop_name"):
            area_rows = group[group["stop_id"].apply(is_area_id)]
            if len(area_rows):
                best_rows.append(area_rows.iloc[0])
            else:
                best_rows.append(group.iloc[0])
        matches = pd.DataFrame(best_rows)
    
    if country == "wa":
        best_rows = []
        for name, group in matches.groupby("stop_name"):
            # Polish feeds use directional suffixes like 01, 02, 03, 04
            # We pick the lowest-numbered platform as the representative
            # so that search("Teatr SÅ‚owackiego") returns one row per stop_name.
            def platform_number(s):
                s = str(s)
                tail = s[-2:]
                return int(tail) if tail.isdigit() else 99

            group_sorted = group.sort_values("stop_id", key=lambda col: col.map(platform_number))
            best_rows.append(group_sorted.iloc[0])

        matches = pd.DataFrame(best_rows)

    
    if country == "de":
        best_rows = []
        for name, group in matches.groupby("stop_name"):
            best_rows.append(group.iloc[0])
        matches = pd.DataFrame(best_rows)


    if country == "cz":
        best_rows = []
        for name, group in matches.groupby("stop_name"):
            best_rows.append(group.iloc[0])
        matches = pd.DataFrame(best_rows)

    return dict(zip(matches["stop_name"], matches["stop_id"]))


def normalize_stop_id(stop_id, stops_df, country=None, operator=None):
    stop_id = str(stop_id)
    stops_df = stops_df.copy()
    stops_df["stop_id"] = stops_df["stop_id"].astype(str)
    

    # --- CZECHIA / PRAGUE PLATFORM MERGING ---
    if country == "cz" and operator == "pr":
        # Prague stop_ids: U{number}Z{platform}P, U{number}S1, etc.
        # Extract U{number} base, return all U{number}Z* siblings
        import re
        m = re.match(r'^(U\d+)', stop_id)
        if m:
            base = m.group(1)  # e.g. "U689"
            siblings = stops_df[
                stops_df["stop_id"].str.match(base + r'Z')
            ]["stop_id"].tolist()
            if siblings:
                return siblings


    # --- GERMANY / VBB PLATFORM MERGING ---
    if country == "de" and operator == "vbb":
        # VBB stop_ids look like: de:11000:900181503::1
        parts = stop_id.split("::")
        if len(parts) >= 2:
            base = parts[0]  # "de:11000:900181503"
            siblings = stops_df[
                stops_df["stop_id"].str.startswith(base + "::")
            ]["stop_id"].tolist()
            if siblings:
                return siblings


    # --- ITALY / ROME STOP MERGING ---
    
    if country == "it" or operator == "vvs":
        row = stops_df[stops_df["stop_id"] == stop_id]
        if len(row):
            base = row.iloc[0]["stop_name"].split("(")[0].strip().lower()
            return stops_df[
                stops_df["stop_name"].str.split("(").str[0].str.strip().str.lower() == base
            ]["stop_id"].tolist()



    """# --- SWEDISH STOP MERGING ---
    if country == "se" and stop_id.isdigit() and len(stop_id) == 16:
        base = stop_id[:-1]  # first 15 digits
        siblings = stops_df[
            stops_df["stop_id"].str.startswith(base)
        ]["stop_id"].tolist()

        if siblings:
            return siblings"""
    
    # --- POLAND PLATFORM MERGING ---
    if operator == "wa":
        print("WARSAW")
        sid = stop_id.replace("-", "")
        if sid[-2:].isdigit():
            base = sid[:-2]
            siblings = stops_df[
                stops_df["stop_id"].str.replace("-", "").str.startswith(base)
            ]["stop_id"].tolist()
            if siblings:
                return siblings


    if "parent_station" not in stops_df.columns:
            stops_df["parent_station"] = None

    # --- EXISTING LOGIC BELOW ---
    if stop_id in stops_df["stop_id"].values:
        children = stops_df[
            stops_df["parent_station"].astype(str) == stop_id
        ]["stop_id"].tolist()
        if children:
            return children
        return [stop_id]

    children = stops_df[
        stops_df["parent_station"].astype(str) == stop_id
    ]["stop_id"].tolist()
    if children:
        return children

    prefix_matches = stops_df[
        stops_df["stop_id"].str.startswith(stop_id + "_")
    ]["stop_id"].tolist()
    if prefix_matches: return prefix_matches
    return []


def sncb_fix_midnight(df):
    """
    SNCB publishes times like 24:xx, 25:xx, etc.
    Convert them into next-day normalized seconds.
    """
    df = df.copy()
    df["normalized_secs"] = df["departure_secs"].apply(
        lambda s: s - 24*3600 if s >= 24*3600 else s
    )
    return df

def departure(stop_id, country, operator):
    
    """
    Replacement departure() with:
    - operator-aware service-day start handling,
    - robust realtime normalization,
    - single-window selection using departure_secs,
    - vectorized merge of delays,
    - correct 24+ rollover to calendar dates,
    - safe re-filter after applying realtime delays,
    - Prague-specific fixes (operator == "pr").
    """
    # Ensure cache exists
    try:
        v.cache[country]
    except:
        v.cache[country] = {}
    
    # Check if cache exists and is still valid
    data = None
    if operator in v.cache[country]:
        cached = v.cache[country][operator]
        if isinstance(cached, dict) and "timestamp" in cached:
            age = time.time() - cached["timestamp"]
            max_age = cached.get("cache_age", 12 * 3600)  # Default 12 hours
            if age < max_age:
                data = cached
    
    if data is None:
        load_static_gtfs(country, operator)

    # Resolve URLs
    try:
        urls = URLS[country][operator]
    except:
        urls = URLS[country]
        urls["gtfs_url"] = urls["gtfs_url"].replace("_SWE_", operator)
        urls["trip_updates"] = urls["trip_updates"].replace("_SWE_", operator)
        urls["vehicle_positions"] = urls["vehicle_positions"].replace("_SWE_", operator)

    data = v.cache[country][operator]
    stops = data["stops"]
    trips = data["trips"]
    stop_times = data["stop_times"]
    if operator == "sncb" or operator == "vbn" or operator == "wa":
        stop_times = sncb_fix_midnight(stop_times)
    routes = data["routes"]
    calendar_dates = data["calendar_dates"]

    # Load realtime
    rt_raw = load_realtime(urls, country, operator)
    rt_delays_clean = _normalize_rt_delays(rt_raw)

    if rt_delays_clean:
        rt_rows = [{"trip_id": k[0], "stop_id": k[1], "delay": int(d)} for k, d in rt_delays_clean.items()]
        rt_df = pd.DataFrame(rt_rows)
    else:
        rt_df = pd.DataFrame(columns=["trip_id", "stop_id", "delay"])

    # -----------------------------
    # SERVICE-DAY START HANDLING
    # -----------------------------
    SERVICE_DAY_START_HOUR = 3  # default for most operators

    # Prague: service day starts at midnight, no 24+ times
    if operator == "pr":
        SERVICE_DAY_START_HOUR = 0

    SERVICE_DAY_START_SECS = SERVICE_DAY_START_HOUR * 3600

    now = datetime.datetime.now()
    now_secs = now.hour * 3600 + now.minute * 60 + now.second
    cutoff = now_secs + HOURS * 3600

    if now_secs >= SERVICE_DAY_START_SECS:
        service_date = now.date()
    else:
        service_date = (now - pd.Timedelta(days=1)).date()

    # -----------------------------
    # STOP-ID NORMALIZATION
    # -----------------------------
    platform_ids = normalize_stop_id(stop_id, stops, country=country, operator=operator)
    if not platform_ids:
        print("No matching stop_ids found for:", stop_id)
        return []

    # -----------------------------
    # WINDOW SELECTION
    # -----------------------------
    window = stop_times[
        (stop_times["departure_secs"].astype(int) >= now_secs) &
        (stop_times["departure_secs"].astype(int) <= cutoff) &
        (stop_times["stop_id"].astype(str).isin(platform_ids))
    ].copy()

    if window.empty:
        tomorrow_overflow = cutoff - 86400 if cutoff >= 86400 else 0
        window = stop_times[
            (
                (stop_times["normalized_secs"].astype(int) >= now_secs) &
                (stop_times["normalized_secs"].astype(int) <= now_secs + HOURS * 3600)
            ) |
            (
                (stop_times["departure_secs"].astype(int) >= 86400) &
                (stop_times["normalized_secs"].astype(int) <= tomorrow_overflow)
            )
        ]
        window = window[window["stop_id"].astype(str).isin(platform_ids)].copy()

    # -----------------------------
    # MERGE STATIC GTFS
    # -----------------------------
    future = (
        window.merge(trips, on="trip_id")
              .merge(routes, on="route_id")
              .merge(stops, on="stop_id")
    )

    # -----------------------------
    # SERVICE-ID FILTERING
    # -----------------------------
    operating_services = calendar_dates[calendar_dates["date"] == service_date]["service_id"].unique().tolist()

    if cutoff >= 86400:
        next_date = service_date + pd.Timedelta(days=1)
        tomorrow_services = calendar_dates[calendar_dates["date"] == next_date]["service_id"].unique().tolist()
        operating_services = list(set(operating_services + tomorrow_services))

    # Prague: skip filter if it would delete everything
    if operator == "vrn" or operator == "vvs" or country == "es" or country == "pt" or operator == "pr" or operator == "wa" or operator == "leipzig" or operator == "all":
        overlap = set(operating_services) & set(future["service_id"].unique())
        if overlap:
            future = future[future["service_id"].isin(operating_services)].copy()
    elif operator == "barcelona": pass
    else:
        future = future[future["service_id"].isin(operating_services)].copy()

    if future.empty:
        print("EMPTY after service_id filtering")
        return []

    # -----------------------------
    # MERGE REALTIME DELAYS
    # -----------------------------
    future["trip_id"] = future["trip_id"].astype(str)
    future["stop_id"] = future["stop_id"].astype(str)
    rt_df["trip_id"] = rt_df["trip_id"].astype(str)
    rt_df["stop_id"] = rt_df["stop_id"].astype(str)

    if not rt_df.empty:
        future = future.merge(rt_df, on=["trip_id", "stop_id"], how="left")
        future["delay"] = future["delay"].fillna(0).astype(int)
    else:
        future["delay"] = 0

    # -----------------------------
    # REALTIME SECONDS + RE-FILTER
    # -----------------------------
    future["rt_departure_secs"] = future["departure_secs"].astype(int) + future["delay"].astype(int)

    DAY = 86400
    if cutoff < DAY:
        future = future[
            (
                (future["rt_departure_secs"] >= now_secs) &
                (future["rt_departure_secs"] <= cutoff)
            ) |
            (
                (future["rt_departure_secs"] >= DAY) &
                (future["rt_departure_secs"] <= DAY + cutoff)
            )
        ].copy()
    else:
        future = future[
            (future["rt_departure_secs"] >= now_secs) &
            (future["rt_departure_secs"] <= cutoff)
        ].copy()

    if future.empty:
        return []

    # -----------------------------
    # BUILD ISO8601 TIMES
    # -----------------------------
    rt = future["rt_departure_secs"].astype(int)
    day_offset = rt // DAY
    secs_in_day = rt % DAY

    base_date = pd.to_datetime(service_date)
    dates = base_date + pd.to_timedelta(day_offset, unit="D")
    times = pd.to_timedelta(secs_in_day, unit="s")
    future["real_time"] = (dates + times).dt.strftime("%Y-%m-%dT%H:%M:%S")

    future = future.sort_values("rt_departure_secs")

    # -----------------------------
    # OUTPUT
    # -----------------------------
    output = []

    def get_destination(row):
        if pd.notna(row.get("stop_headsign")) and row["stop_headsign"].strip():
            return row["stop_headsign"]
        if pd.notna(row.get("trip_headsign")) and row["trip_headsign"].strip():
            return row["trip_headsign"]
        if pd.notna(row.get("route_long_name")) and row["route_long_name"].strip():
            return row["route_long_name"]
        seq = stop_times[stop_times["trip_id"] == row["trip_id"]]
        if len(seq):
            last_stop_id = seq.iloc[-1]["stop_id"]
            name = stops.loc[stops["stop_id"] == last_stop_id, "stop_name"]
            if len(name):
                return name.iloc[0]
        return ""

    for _, row in future.iterrows():
        #direction_code = str(row.get("direction_id", ""))
        try: 
            direction_code = row["direction_id"]
            direction_code = int(direction_code) + 1
        except: direction_code = 0
        #if not direction_code or direction_code == "nan":
            #direction_code = 0

        #if operator == "sncb" and int(row.get("pickup_type", 0)) + int(row.get("drop_off_type", 0)):
        #    continue
        if operator == "sncb":
            print(get_destination(row).split("(")[0], int(row.get("drop_off_type", 0)), int(row.get("pickup_type", 0)))
            #if int(row.get("drop_off_type", 0)) + int(row.get("pickup_type", 0)):
            if  int(row.get("drop_off_type", 0)) == 0 and int(row.get("pickup_type", 0)) == 1: continue
            if  int(row.get("drop_off_type", 0)) + int(row.get("pickup_type", 0)) == 2: continue

        item = {
            "destination": get_destination(row).split("(")[0],
            "direction_code": direction_code,
            "expected": row["real_time"],
            "line": {
                "id": str(row["route_short_name"]),
                "transport_mode": ROUTE_TYPE_NAMES.get(row["route_type_static"], row["route_type_static"])
            },
            "deviations": [],
        }
        if item not in output:
            output.append(item)

    if output:
        v.operators[country][operator][stop_id] = output

    return output