admin管理员组

文章数量:1025477

I have open api mention in link below

/?version=2.1

By using below query, even though details in input file is getting matched, I am not getting NPI in output. Whether can anyone help me in modifying query ?

Input data: S No|anization_name|city|state|Primary Address 1|saint|mishawaka|in|201 linc

Query:

import requests import pandas as pd from google.colab import files import csv from typing import List, Dict

def search_nppes(anization_name, city, state, primary_address) -> List[Dict]: """ Search the NPPES API for matches based on anization name, city, state, and partial match of primary address.

Returns:
List[Dict]: List of dictionaries containing the NPPES data for each matching record
"""
base_url = "/"
params = {
    "version": "2.1",
    "anization_name": anization_name,
    "enumeration_type": 2,
    "city": city,
    "state": state,
    "limit": 100
}
response = requests.get(base_url, params=params)

if response.status_code == 200:
    data = response.json()
    if 'results' in data and data['results']:
        results = []
        for result in data['results']:
            practice_location = f"{result['addresses'][0]['address_1']}, {result['addresses'][0]['city']}, {result['addresses'][0]['state']}"
            if primary_address.lower()[:5] in practice_location.lower():
                if (result['basic']['anization_name'].lower() == anization_name.lower() and
                    result['addresses'][0]['city'].lower() == city.lower() and
                    result['addresses'][0]['state'].lower() == state.lower()):
                    results.append({
                        'npi': result['number'],
                        'anization_name': result['basic']['anization_name'],
                        'taxonomy': result['taxonomies'][0]['desc'] if result['taxonomies'] else '',
                        'address': practice_location
                    })
        return results
    return []
uploaded = files.upload()

input_filename = list(uploaded.keys())[0] with open(input_filename, 'r') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read(1024)) delimiter = dialect.delimiter df = pd.read_csv(input_filename, sep=delimiter) required_columns = ['S No', 'anization_name', 'city', 'state', 'Primary Address'] missing_columns = [col for col in required_columns if col not in df.columns] if missing_columns: print(f"Error: The following required columns are missing from your CSV: {', '.join(missing_columns)}") print(f"Your CSV contains the following columns: {', '.join(df.columns)}") print("Please make sure your CSV file has 'S No', 'first_name', 'last_name', 'city', 'state', and 'Primary Address' columns.") else: print(f"Successfully loaded CSV with delimiter: '{delimiter}'") print(f"Columns found: {', '.join(df.columns)}")

results = []
for index, row in df.iterrows():
    s_no = row['S No']
    primary_address = row['Primary Address']
    search_results = search_nppes(row['anization_name'], row['city'], row['state'], primary_address)
    if search_results:
        for result in search_results:
            result['S No'] = s_no
            results.append(result)

    if (index + 1) % 10 == 0:
        print(f"Processed {index + 1} entries...")

output_df = pd.DataFrame(results)

output_filename = 'nppes_search_results.csv'
output_df.to_csv(output_filename, index=False)
print(f"Results saved to {output_filename}")

files.download(output_filename)

I have open api mention in link below

/?version=2.1

By using below query, even though details in input file is getting matched, I am not getting NPI in output. Whether can anyone help me in modifying query ?

Input data: S No|anization_name|city|state|Primary Address 1|saint|mishawaka|in|201 linc

Query:

import requests import pandas as pd from google.colab import files import csv from typing import List, Dict

def search_nppes(anization_name, city, state, primary_address) -> List[Dict]: """ Search the NPPES API for matches based on anization name, city, state, and partial match of primary address.

Returns:
List[Dict]: List of dictionaries containing the NPPES data for each matching record
"""
base_url = "/"
params = {
    "version": "2.1",
    "anization_name": anization_name,
    "enumeration_type": 2,
    "city": city,
    "state": state,
    "limit": 100
}
response = requests.get(base_url, params=params)

if response.status_code == 200:
    data = response.json()
    if 'results' in data and data['results']:
        results = []
        for result in data['results']:
            practice_location = f"{result['addresses'][0]['address_1']}, {result['addresses'][0]['city']}, {result['addresses'][0]['state']}"
            if primary_address.lower()[:5] in practice_location.lower():
                if (result['basic']['anization_name'].lower() == anization_name.lower() and
                    result['addresses'][0]['city'].lower() == city.lower() and
                    result['addresses'][0]['state'].lower() == state.lower()):
                    results.append({
                        'npi': result['number'],
                        'anization_name': result['basic']['anization_name'],
                        'taxonomy': result['taxonomies'][0]['desc'] if result['taxonomies'] else '',
                        'address': practice_location
                    })
        return results
    return []
uploaded = files.upload()

input_filename = list(uploaded.keys())[0] with open(input_filename, 'r') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read(1024)) delimiter = dialect.delimiter df = pd.read_csv(input_filename, sep=delimiter) required_columns = ['S No', 'anization_name', 'city', 'state', 'Primary Address'] missing_columns = [col for col in required_columns if col not in df.columns] if missing_columns: print(f"Error: The following required columns are missing from your CSV: {', '.join(missing_columns)}") print(f"Your CSV contains the following columns: {', '.join(df.columns)}") print("Please make sure your CSV file has 'S No', 'first_name', 'last_name', 'city', 'state', and 'Primary Address' columns.") else: print(f"Successfully loaded CSV with delimiter: '{delimiter}'") print(f"Columns found: {', '.join(df.columns)}")

results = []
for index, row in df.iterrows():
    s_no = row['S No']
    primary_address = row['Primary Address']
    search_results = search_nppes(row['anization_name'], row['city'], row['state'], primary_address)
    if search_results:
        for result in search_results:
            result['S No'] = s_no
            results.append(result)

    if (index + 1) % 10 == 0:
        print(f"Processed {index + 1} entries...")

output_df = pd.DataFrame(results)

output_filename = 'nppes_search_results.csv'
output_df.to_csv(output_filename, index=False)
print(f"Results saved to {output_filename}")

files.download(output_filename)

本文标签: pythonExtract organization NPI based on data provided in input fileStack Overflow