Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 16 additions & 3 deletions src/nba_api/library/http.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,15 @@ def __init__(self, response, status_code, url):
self._response = response
self._status_code = status_code
self._url = url
self._dict_cache = None

def get_response(self):
return self._response

def get_dict(self):
return json.loads(self._response)
if self._dict_cache is None:
self._dict_cache = json.loads(self._response)
return self._dict_cache

def get_json(self):
return json.dumps(self.get_dict())
Expand All @@ -54,6 +57,9 @@ def valid_json(self):
def get_url(self):
return self._url

def get_status_code(self):
return self._status_code


class NBAHTTP:
nba_response = NBAResponse
Expand Down Expand Up @@ -173,7 +179,14 @@ def send_api_request(

data = self.nba_response(response=contents, status_code=status_code, url=url)

if raise_exception_on_error and not data.valid_json():
raise Exception("InvalidResponse: Response is not in a valid JSON format.")
if raise_exception_on_error:
if status_code is not None and status_code >= 400:
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why the additional breakout of the exception?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The code now checks the HTTP status code first (any >= 400) raising an Exception with the status code, before
validating JSON. This ensures HTTP errors are properly caught.

raise Exception(
f"HTTPError: Request failed with status code {status_code}."
)
if not data.valid_json():
raise Exception(
"InvalidResponse: Response is not in a valid JSON format."
)

return data
4 changes: 3 additions & 1 deletion src/nba_api/live/nba/endpoints/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
class Endpoint:
class DataSet:
key = None
data = {}

def __init__(self, data=None):
if data is None:
Expand All @@ -23,6 +22,9 @@ def get_request_url(self):
def get_response(self):
return self.nba_response.get_response()

def get_status_code(self):
return self.nba_response.get_status_code()

def get_dict(self):
return self.nba_response.get_dict()

Expand Down
5 changes: 4 additions & 1 deletion src/nba_api/stats/endpoints/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
class Endpoint:
class DataSet:
key: str | None = None
data: dict[str, Any] = {}

def __init__(self, data: dict[str, Any]) -> None:
self.data = data
Expand Down Expand Up @@ -88,6 +87,10 @@ def get_response(self) -> str:
"""Return the raw response string."""
return self.nba_response.get_response()

def get_status_code(self) -> int:
"""Return the HTTP status code of the response."""
return self.nba_response.get_status_code()

def get_dict(self) -> dict[str, Any]:
"""Return the response as a dictionary."""
return self.nba_response.get_dict()
Expand Down
15 changes: 8 additions & 7 deletions src/nba_api/stats/library/http.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def get_normalized_dict(self):
endpoint_parser = get_parser_for_endpoint(self._endpoint, raw_data)
for name, dataset in endpoint_parser.get_data_sets().items():
data[name] = self._build_rows(dataset["headers"], dataset["data"])
except (KeyError, ImportError):
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why remove the import error here? To be honest, I'm not sure why it's even catching it here in the first place

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the import of get_parser_for_endpoint fails, that's a critical issue that should raise an exception, not silently
continue with an empty result

except KeyError:
pass

return data
Expand All @@ -70,18 +70,19 @@ def get_normalized_json(self):
return json.dumps(self.get_normalized_dict())

def get_parameters(self):
if not self.valid_json() or "parameters" not in self.get_dict():
raw = self.get_dict() if self.valid_json() else None
if raw is None or "parameters" not in raw:
return None

parameters = self.get_dict()["parameters"]
parameters = raw["parameters"]
if isinstance(parameters, dict):
return parameters

parameters = {}
for parameter in self.get_dict()["parameters"]:
result = {}
for parameter in parameters:
for key, value in parameter.items():
parameters.update({key: value})
return parameters
result[key] = value
return result

def get_headers_from_data_sets(self):
raw_dict = self.get_dict()
Expand Down
37 changes: 16 additions & 21 deletions src/nba_api/stats/static/players.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,9 @@
wnba_players,
)


def _find_players(regex_pattern, row_id, players=players):
players_found = []
for player in players:
if re.search(
_strip_accents(regex_pattern),
_strip_accents(str(player[row_id])),
flags=re.I,
):
players_found.append(_get_player_dict(player))
return players_found
# Pre-built index for O(1) ID lookup
_players_by_id = {p[player_index_id]: p for p in players}
_wnba_players_by_id = {p[player_index_id]: p for p in wnba_players}


def _strip_accents(inputstr: str) -> str:
Expand All @@ -36,15 +28,18 @@ def _strip_accents(inputstr: str) -> str:
)


def _find_player_by_id(player_id, players=players):
regex_pattern = f"^{player_id}$"
players_list = _find_players(regex_pattern, player_index_id, players=players)
if len(players_list) > 1:
raise Exception("Found more than 1 id")
elif not players_list:
return None
else:
return players_list[0]
def _find_players(regex_pattern, row_id, players=players):
compiled = re.compile(_strip_accents(regex_pattern), flags=re.I)
players_found = []
for player in players:
if compiled.search(_strip_accents(str(player[row_id]))):
players_found.append(_get_player_dict(player))
return players_found


def _find_player_by_id(player_id, _index=_players_by_id):
player = _index.get(player_id)
return _get_player_dict(player) if player is not None else None


def _get_players(players=players):
Expand Down Expand Up @@ -121,7 +116,7 @@ def find_wnba_players_by_last_name(regex_pattern):


def find_wnba_player_by_id(player_id):
return _find_player_by_id(player_id, players=wnba_players)
return _find_player_by_id(player_id, _index=_wnba_players_by_id)


def get_wnba_players():
Expand Down
52 changes: 27 additions & 25 deletions src/nba_api/stats/static/teams.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import re
import unicodedata

from nba_api.stats.library.data import (
team_index_abbreviation,
Expand All @@ -13,42 +14,43 @@
wnba_teams,
)

# Pre-built indexes for O(1) lookups
_teams_by_id = {t[team_index_id]: t for t in teams}
_teams_by_abbreviation = {t[team_index_abbreviation]: t for t in teams}
_wnba_teams_by_id = {t[team_index_id]: t for t in wnba_teams}
_wnba_teams_by_abbreviation = {t[team_index_abbreviation]: t for t in wnba_teams}


def _strip_accents(inputstr: str) -> str:
normalizedstr = unicodedata.normalize("NFD", inputstr)
return "".join(c for c in normalizedstr if unicodedata.category(c) != "Mn")


def _find_teams(regex_pattern, row_id, teams=teams):
compiled = re.compile(_strip_accents(regex_pattern), flags=re.I)
teams_found = []
for team in teams:
if re.search(regex_pattern, str(team[row_id]), flags=re.I):
if compiled.search(_strip_accents(str(team[row_id]))):
teams_found.append(_get_team_dict(team))
return teams_found


def _find_team_name_by_id(team_id, teams=teams):
regex_pattern = f"^{team_id}$"
teams_list = _find_teams(regex_pattern, team_index_id, teams=teams)
if len(teams_list) > 1:
raise Exception("Found more than 1 id")
elif not teams_list:
return None
else:
return teams_list[0]
def _find_team_name_by_id(team_id, _index=_teams_by_id):
team = _index.get(team_id)
return _get_team_dict(team) if team is not None else None


def _find_team_by_abbreviation(abbreviation, teams=teams):
regex_pattern = f"^{abbreviation}$"
teams_list = _find_teams(regex_pattern, team_index_abbreviation, teams=teams)
if len(teams_list) > 1:
raise Exception("Found more than 1 id")
elif not teams_list:
return None
else:
return teams_list[0]
def _find_team_by_abbreviation(abbreviation, _index=_teams_by_abbreviation):
team = _index.get(abbreviation.upper())
return _get_team_dict(team) if team is not None else None


def _find_teams_by_championship_year(year, teams=teams):
for team in teams:
if year in team[team_index_championship_year]:
result = team[team_index_full_name]
return result
return [
_get_team_dict(team)
for team in teams
if year in team[team_index_championship_year]
]


def _find_teams_by_year_founded(year, teams=teams):
Expand Down Expand Up @@ -139,11 +141,11 @@ def find_wnba_teams_by_championship_year(year):


def find_wnba_team_by_abbreviation(abbreviation):
return _find_team_by_abbreviation(abbreviation, teams=wnba_teams)
return _find_team_by_abbreviation(abbreviation, _index=_wnba_teams_by_abbreviation)


def find_wnba_team_name_by_id(team_id):
return _find_team_name_by_id(team_id, teams=wnba_teams)
return _find_team_name_by_id(team_id, _index=_wnba_teams_by_id)


def get_wnba_teams():
Expand Down