I’m trying to deploy my API, that’s working perfect local. When I deploy and try to acess the endpoint movie_predict, the service is reseted. There are something to config?
import pandas as pd
from utils.string import name_treat, gruped_metadados, tagline_treat, remover_pontuacao
from sklearn.metrics.pairwise import linear_kernel
from sklearn.feature_extraction.text import TfidfVectorizer
import unicodedata
from fastapi import FastAPI, Query
from fastapi.responses import JSONResponse
from traceback import print_exception
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["POST", "GET"],
allow_headers=["*"],
expose_headers=["*"]
)
@app.get("/movie_predict/{movie_name}")
async def simple_recommendation(movie_name: str):
try:
movies_list = simple_movie_recommendation(movie_name)
return JSONResponse(
status_code=200,
content={'movies': movies_list},
)
except Exception as e:
print_exception(e)
return JSONResponse(
content={'message': 'Internal server error'},
status_code=500
)
@app.get("/movies_list/")
async def movie_list():
try:
df = data()
movies_list = sorted(list(df[df.vote_count >= 50].original_title.unique()))
return JSONResponse(
status_code=200,
content={'movies': movies_list},
)
except Exception as e:
print_exception(e)
return JSONResponse(
content={'message': 'Internal server error'},
status_code=500
)
def data():
movies = pd.read_csv(
'https://raw.githubusercontent.com/alexvaroz/data_science_alem_do_basico/master/tmdb_movies_data.csv')
movies.original_title = movies.original_title.apply(
lambda x: unicodedata.normalize('NFKD', x))
return movies
def recommendation_by_content(title, cosine_sim, df, indice):
idx = indice[title]
sim_scores = enumerate(list(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:11]
movie_indices = [i[0] for i in sim_scores]
return list(df['original_title'].loc[movie_indices])
def simple_movie_recommendation(title):
movies = data()
movies["overview"] = movies["overview"].fillna('')
tfidf = TfidfVectorizer(stop_words='english')
tfidf_matriz = tfidf.fit_transform(movies['overview'])
cosine_sim = linear_kernel(tfidf_matriz)
reversed_index = pd.Series(
movies.index, index=movies.original_title).drop_duplicates()
return recommendation_by_content(title, cosine_sim, movies, reversed_index)