🗂️ Finding Unique Values

Discovering unique values and their patterns is essential for data analysis! NumPy's unique functions help you find distinct elements, count occurrences, and analyze distributions to understand your data's composition and trends.

import numpy as np

# Customer ratings analysis
ratings = np.array([5, 3, 4, 5, 2, 4, 5, 3, 4, 1, 5, 4, 3, 5, 2])

# Find unique values and their counts
unique_ratings, counts = np.unique(ratings, return_counts=True)

print(f"All ratings: {ratings}")
print(f"Unique ratings: {unique_ratings}")
print(f"Rating counts: {counts}")

# Analysis
total_responses = len(ratings)
for rating, count in zip(unique_ratings, counts):
    percentage = (count / total_responses) * 100
    print(f"Rating {rating}: {count} responses ({percentage:.1f}%)")

🔍 Basic Unique Operations

Find distinct elements in arrays.

Simple Unique Values

import numpy as np

# Product categories
categories = np.array(['Electronics', 'Clothing', 'Books', 'Electronics', 
                      'Home', 'Books', 'Electronics', 'Clothing', 'Home'])

# Find unique categories
unique_categories = np.unique(categories)
print(f"All categories: {categories}")
print(f"Unique categories: {unique_categories}")
print(f"Number of unique categories: {len(unique_categories)}")

# Check for duplicates
has_duplicates = len(categories) != len(unique_categories)
print(f"Has duplicates: {has_duplicates}")

Unique with Counts

import numpy as np

# Website page visits
pages_visited = np.array(['home', 'products', 'about', 'home', 'contact', 
                         'products', 'home', 'products', 'about', 'home'])

# Get unique pages and their visit counts
unique_pages, visit_counts = np.unique(pages_visited, return_counts=True)

print(f"📊 Page Visit Analysis:")
for page, count in zip(unique_pages, visit_counts):
    print(f"  {page}: {count} visits")

# Find most popular page
most_popular_idx = np.argmax(visit_counts)
print(f"\n🔥 Most popular: {unique_pages[most_popular_idx]} ({visit_counts[most_popular_idx]} visits)")

📈 Statistical Analysis

Use unique values for data distribution analysis.

Distribution Analysis

import numpy as np

# Survey responses (1-10 scale)
survey_data = np.array([7, 8, 6, 9, 7, 5, 8, 7, 9, 6, 8, 7, 9, 8, 6, 
                       7, 8, 9, 5, 7, 8, 6, 9, 7, 8])

# Analyze distribution
unique_scores, counts = np.unique(survey_data, return_counts=True)
total_responses = len(survey_data)

print(f"📊 Survey Score Distribution:")
print(f"Total responses: {total_responses}")
print(f"Score range: {unique_scores[0]} - {unique_scores[-1]}")

# Calculate statistics
for score, count in zip(unique_scores, counts):
    percentage = (count / total_responses) * 100
    bar = '█' * int(percentage / 2)  # Visual bar
    print(f"Score {score}: {count:2d} ({percentage:4.1f}%) {bar}")

# Key metrics
mean_score = np.mean(survey_data)
most_common_idx = np.argmax(counts)
print(f"\nAverage score: {mean_score:.1f}")
print(f"Most common: {unique_scores[most_common_idx]} ({counts[most_common_idx]} times)")

Quality Control

import numpy as np

# Manufacturing defect codes
defect_codes = np.array(['A01', 'B12', 'A01', 'C03', 'A01', 'B12', 
                        'D04', 'A01', 'B12', 'C03', 'A01', 'E05'])

# Analyze defect patterns
unique_defects, defect_counts = np.unique(defect_codes, return_counts=True)

print(f"🔧 Quality Control Analysis:")
print(f"Total defects found: {len(defect_codes)}")
print(f"Unique defect types: {len(unique_defects)}")

# Rank defects by frequency
defect_ranking = np.argsort(defect_counts)[::-1]  # Descending order

print(f"\n📉 Defect Frequency Ranking:")
for i, idx in enumerate(defect_ranking):
    defect = unique_defects[idx]
    count = defect_counts[idx]
    percentage = (count / len(defect_codes)) * 100
    print(f"  {i+1}. {defect}: {count} occurrences ({percentage:.1f}%)")

# Focus areas (top 80% of defects)
cumulative_percentage = 0
focus_defects = []
for idx in defect_ranking:
    count = defect_counts[idx]
    percentage = (count / len(defect_codes)) * 100
    cumulative_percentage += percentage
    focus_defects.append(unique_defects[idx])
    if cumulative_percentage >= 80:
        break

print(f"\nFocus on these defects (80% of issues): {focus_defects}")

🎯 Multi-dimensional Unique Analysis

Find unique elements in multi-dimensional arrays.

Row-wise Unique Analysis

import numpy as np

# Customer purchase patterns: [customer_id, product_category, purchase_amount]
purchases = np.array([['C001', 'Electronics', '299'],
                     ['C002', 'Clothing', '89'],
                     ['C001', 'Books', '25'],
                     ['C003', 'Electronics', '450'],
                     ['C002', 'Electronics', '199'],
                     ['C001', 'Clothing', '120']])

# Analyze by column
customers = purchases[:, 0]
categories = purchases[:, 1]
amounts = purchases[:, 2].astype(float)

# Unique customers and their activity
unique_customers, customer_counts = np.unique(customers, return_counts=True)
print(f"👥 Customer Analysis:")
for customer, count in zip(unique_customers, customer_counts):
    print(f"  {customer}: {count} purchases")

# Category popularity
unique_categories, category_counts = np.unique(categories, return_counts=True)
print(f"\n📦 Popular Categories:")
category_ranking = np.argsort(category_counts)[::-1]
for idx in category_ranking:
    category = unique_categories[idx]
    count = category_counts[idx]
    print(f"  {category}: {count} purchases")

Cross-tabulation Analysis

import numpy as np

# Employee data: [department, performance_rating]
employee_data = np.array([['Sales', 'A'], ['IT', 'B'], ['Sales', 'A'], 
                         ['Marketing', 'A'], ['IT', 'A'], ['Sales', 'B'],
                         ['Marketing', 'B'], ['IT', 'A'], ['Sales', 'A'],
                         ['Marketing', 'A']])

departments = employee_data[:, 0]
ratings = employee_data[:, 1]

# Department analysis
unique_depts, dept_counts = np.unique(departments, return_counts=True)
print(f"🏢 Department Distribution:")
for dept, count in zip(unique_depts, dept_counts):
    print(f"  {dept}: {count} employees")

# Performance analysis by department
print(f"\n⭐ Performance by Department:")
for dept in unique_depts:
    dept_mask = departments == dept
    dept_ratings = ratings[dept_mask]
    unique_ratings, rating_counts = np.unique(dept_ratings, return_counts=True)
    
    print(f"  {dept}:")
    for rating, count in zip(unique_ratings, rating_counts):
        percentage = (count / len(dept_ratings)) * 100
        print(f"    Rating {rating}: {count} ({percentage:.1f}%)")

🔍 Advanced Unique Operations

Explore sophisticated unique value analysis.

Finding Duplicates

import numpy as np

# Email list with potential duplicates
emails = np.array(['user1@email.com', 'user2@email.com', 'user1@email.com',
                  'user3@email.com', 'user2@email.com', 'user4@email.com',
                  'user1@email.com', 'user5@email.com'])

# Find duplicates
unique_emails, indices, counts = np.unique(emails, return_index=True, return_counts=True)

print(f"📧 Email Analysis:")
print(f"Total emails: {len(emails)}")
print(f"Unique emails: {len(unique_emails)}")
print(f"Duplicates found: {len(emails) - len(unique_emails)}")

# Show duplicates
duplicates = unique_emails[counts > 1]
if len(duplicates) > 0:
    print(f"\n🔄 Duplicate emails:")
    for email in duplicates:
        count = counts[unique_emails == email][0]
        print(f"  {email}: appears {count} times")
else:
    print("\n✅ No duplicates found")

Set Operations

import numpy as np

# Two customer groups
group_a_customers = np.array(['C001', 'C002', 'C003', 'C004', 'C005'])
group_b_customers = np.array(['C003', 'C004', 'C006', 'C007', 'C008'])

print(f"Group A customers: {group_a_customers}")
print(f"Group B customers: {group_b_customers}")

# Set operations
common_customers = np.intersect1d(group_a_customers, group_b_customers)
all_customers = np.union1d(group_a_customers, group_b_customers)
only_a = np.setdiff1d(group_a_customers, group_b_customers)
only_b = np.setdiff1d(group_b_customers, group_a_customers)

print(f"\n🔗 Customer Analysis:")
print(f"Common to both groups: {common_customers}")
print(f"All unique customers: {all_customers}")
print(f"Only in Group A: {only_a}")
print(f"Only in Group B: {only_b}")

# Statistics
total_unique = len(all_customers)
overlap_rate = len(common_customers) / total_unique * 100
print(f"\nOverlap rate: {overlap_rate:.1f}%")

🏷️ Data Cleaning Applications

Use unique analysis for data validation and cleaning.

Data Validation

import numpy as np

# Product data with potential issues
product_data = np.array(['Laptop', 'laptop', 'LAPTOP', 'Mouse', 'mouse',
                        'Keyboard', 'Monitor', 'monitor', 'Speaker'])

print(f"📦 Product Data Validation:")
print(f"Raw data: {product_data}")

# Case-insensitive unique check
normalized_data = np.char.lower(product_data)
unique_normalized, counts = np.unique(normalized_data, return_counts=True)

print(f"\n🔍 After normalization:")
inconsistencies = []
for product, count in zip(unique_normalized, counts):
    if count > 1:
        # Find original variations
        variations = product_data[normalized_data == product]
        unique_variations = np.unique(variations)
        print(f"  '{product}': {count} entries, variations: {unique_variations}")
        inconsistencies.extend(unique_variations[1:])  # Skip first as canonical
    else:
        print(f"  '{product}': {count} entry (clean)")

if inconsistencies:
    print(f"\n⚠️ Inconsistencies to fix: {inconsistencies}")
else:
    print("\n✅ Data is consistent")

Category Analysis

import numpy as np

# Sales data by region and product
sales_data = np.array([['North', 'Electronics', 15000],
                      ['South', 'Clothing', 8000],
                      ['North', 'Books', 3000],
                      ['East', 'Electronics', 12000],
                      ['West', 'Clothing', 9500],
                      ['South', 'Electronics', 11000],
                      ['North', 'Clothing', 7500]])

regions = sales_data[:, 0]
products = sales_data[:, 1]
amounts = sales_data[:, 2].astype(float)

# Regional performance
unique_regions = np.unique(regions)
print(f"🌎 Regional Performance:")
for region in unique_regions:
    region_mask = regions == region
    region_sales = amounts[region_mask]
    region_total = np.sum(region_sales)
    region_avg = np.mean(region_sales)
    print(f"  {region}: ${region_total:,.0f} total, ${region_avg:,.0f} average")

# Product performance across regions
unique_products = np.unique(products)
print(f"\n📊 Product Performance:")
for product in unique_products:
    product_mask = products == product
    product_regions = regions[product_mask]
    product_sales = amounts[product_mask]
    
    total_sales = np.sum(product_sales)
    regions_selling = len(np.unique(product_regions))
    print(f"  {product}: ${total_sales:,.0f} total, sold in {regions_selling} regions")

🎯 Key Takeaways

🧠 Real-World Impact

Understanding unique values helps you:

  • Clean messy data by finding inconsistencies
  • Analyze distributions to understand patterns
  • Detect anomalies through frequency analysis
  • Optimize business processes based on popular categories

🚀 What's Next?

Excellent work mastering array aggregation! You've covered the fundamental tools for data summarization and analysis.

Ready for the next section? Learn about Random Number Generation and advanced NumPy topics, or review any aggregation concepts you'd like to practice more.

Congratulations! You've completed the Array Aggregation section! 🎉

Was this helpful?

😔Poor
🙁Fair
😊Good
😄Great
🤩Excellent